Merge pull request #17172 from ziglang/ip-structs

compiler: move struct types into InternPool proper
This commit is contained in:
Andrew Kelley
2023-09-22 09:38:41 -07:00
committed by GitHub
41 changed files with 3630 additions and 2900 deletions
+24 -7
View File
@@ -4758,6 +4758,9 @@ fn structDeclInner(
.known_non_opv = false,
.known_comptime_only = false,
.is_tuple = false,
.any_comptime_fields = false,
.any_default_inits = false,
.any_aligned_fields = false,
});
return indexToRef(decl_inst);
}
@@ -4881,6 +4884,9 @@ fn structDeclInner(
var known_non_opv = false;
var known_comptime_only = false;
var any_comptime_fields = false;
var any_aligned_fields = false;
var any_default_inits = false;
for (container_decl.ast.members) |member_node| {
var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
@@ -4910,13 +4916,13 @@ fn structDeclInner(
const have_value = member.ast.value_expr != 0;
const is_comptime = member.comptime_token != null;
if (is_comptime and layout == .Packed) {
return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{});
} else if (is_comptime and layout == .Extern) {
return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{});
}
if (!is_comptime) {
if (is_comptime) {
switch (layout) {
.Packed => return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}),
.Extern => return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}),
.Auto => any_comptime_fields = true,
}
} else {
known_non_opv = known_non_opv or
nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr);
known_comptime_only = known_comptime_only or
@@ -4942,6 +4948,7 @@ fn structDeclInner(
if (layout == .Packed) {
try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{});
}
any_aligned_fields = true;
const align_ref = try expr(&block_scope, &namespace.base, coerced_align_ri, member.ast.align_expr);
if (!block_scope.endsWithNoReturn()) {
_ = try block_scope.addBreak(.break_inline, decl_inst, align_ref);
@@ -4955,6 +4962,7 @@ fn structDeclInner(
}
if (have_value) {
any_default_inits = true;
const ri: ResultInfo = .{ .rl = if (field_type == .none) .none else .{ .coerced_ty = field_type } };
const default_inst = try expr(&block_scope, &namespace.base, ri, member.ast.value_expr);
@@ -4982,6 +4990,9 @@ fn structDeclInner(
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
.is_tuple = is_tuple,
.any_comptime_fields = any_comptime_fields,
.any_default_inits = any_default_inits,
.any_aligned_fields = any_aligned_fields,
});
wip_members.finishBits(bits_per_field);
@@ -12080,6 +12091,9 @@ const GenZir = struct {
known_non_opv: bool,
known_comptime_only: bool,
is_tuple: bool,
any_comptime_fields: bool,
any_default_inits: bool,
any_aligned_fields: bool,
}) !void {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@@ -12117,6 +12131,9 @@ const GenZir = struct {
.is_tuple = args.is_tuple,
.name_strategy = gz.anon_name_strategy,
.layout = args.layout,
.any_comptime_fields = args.any_comptime_fields,
.any_default_inits = args.any_default_inits,
.any_aligned_fields = args.any_aligned_fields,
}),
.operand = payload_index,
} },
+911 -195
View File
@@ -1,7 +1,7 @@
//! All interned objects have both a value and a type.
//! This data structure is self-contained, with the following exceptions:
//! * type_struct via Module.Struct.Index
//! * type_opaque via Module.Namespace.Index and Module.Decl.Index
//! * Module.Namespace has a pointer to Module.File
//! * Module.Decl has a pointer to Module.CaptureScope
/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are
/// constructed lazily.
@@ -39,17 +39,11 @@ allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{},
/// Same pattern as with `decls_free_list`.
namespaces_free_list: std.ArrayListUnmanaged(Module.Namespace.Index) = .{},
/// Struct objects are stored in this data structure because:
/// * They contain pointers such as the field maps.
/// * They need to be mutated after creation.
allocated_structs: std.SegmentedList(Module.Struct, 0) = .{},
/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack.
structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{},
/// Some types such as enums, structs, and unions need to store mappings from field names
/// to field index, or value to field index. In such cases, they will store the underlying
/// field names and values directly, relying on one of these maps, stored separately,
/// to provide lookup.
/// These are not serialized; it is computed upon deserialization.
maps: std.ArrayListUnmanaged(FieldMap) = .{},
/// Used for finding the index inside `string_bytes`.
@@ -365,11 +359,291 @@ pub const Key = union(enum) {
namespace: Module.Namespace.Index,
};
pub const StructType = extern struct {
/// The `none` tag is used to represent a struct with no fields.
index: Module.Struct.OptionalIndex,
/// May be `none` if the struct has no declarations.
/// Although packed structs and non-packed structs are encoded differently,
/// this struct is used for both categories since they share some common
/// functionality.
pub const StructType = struct {
extra_index: u32,
/// `none` when the struct is `@TypeOf(.{})`.
decl: Module.Decl.OptionalIndex,
/// `none` when the struct has no declarations.
namespace: Module.Namespace.OptionalIndex,
/// Index of the struct_decl ZIR instruction.
zir_index: Zir.Inst.Index,
layout: std.builtin.Type.ContainerLayout,
field_names: NullTerminatedString.Slice,
field_types: Index.Slice,
field_inits: Index.Slice,
field_aligns: Alignment.Slice,
runtime_order: RuntimeOrder.Slice,
comptime_bits: ComptimeBits,
offsets: Offsets,
names_map: OptionalMapIndex,
pub const ComptimeBits = struct {
start: u32,
/// This is the number of u32 elements, not the number of struct fields.
len: u32,
pub fn get(this: @This(), ip: *const InternPool) []u32 {
return ip.extra.items[this.start..][0..this.len];
}
pub fn getBit(this: @This(), ip: *const InternPool, i: usize) bool {
if (this.len == 0) return false;
return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0;
}
pub fn setBit(this: @This(), ip: *const InternPool, i: usize) void {
this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32);
}
pub fn clearBit(this: @This(), ip: *const InternPool, i: usize) void {
this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32));
}
};
pub const Offsets = struct {
start: u32,
len: u32,
pub fn get(this: @This(), ip: *const InternPool) []u32 {
return @ptrCast(ip.extra.items[this.start..][0..this.len]);
}
};
pub const RuntimeOrder = enum(u32) {
/// Placeholder until layout is resolved.
unresolved = std.math.maxInt(u32) - 0,
/// Field not present at runtime
omitted = std.math.maxInt(u32) - 1,
_,
pub const Slice = struct {
start: u32,
len: u32,
pub fn get(slice: Slice, ip: *const InternPool) []RuntimeOrder {
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
}
};
pub fn toInt(i: @This()) ?u32 {
return switch (i) {
.omitted => null,
.unresolved => unreachable,
else => @intFromEnum(i),
};
}
};
/// Look up field index based on field name.
pub fn nameIndex(self: StructType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const names_map = self.names_map.unwrap() orelse {
const i = name.toUnsigned(ip) orelse return null;
if (i >= self.field_types.len) return null;
return i;
};
const map = &ip.maps.items[@intFromEnum(names_map)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.field_names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @intCast(field_index);
}
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
self: @This(),
ip: *InternPool,
name: NullTerminatedString,
) ?u32 {
return ip.addFieldName(self.names_map.unwrap().?, self.field_names.start, name);
}
pub fn fieldAlign(s: @This(), ip: *const InternPool, i: usize) Alignment {
if (s.field_aligns.len == 0) return .none;
return s.field_aligns.get(ip)[i];
}
pub fn fieldInit(s: @This(), ip: *const InternPool, i: usize) Index {
if (s.field_inits.len == 0) return .none;
return s.field_inits.get(ip)[i];
}
/// Returns `none` in the case the struct is a tuple.
pub fn fieldName(s: @This(), ip: *const InternPool, i: usize) OptionalNullTerminatedString {
if (s.field_names.len == 0) return .none;
return s.field_names.get(ip)[i].toOptional();
}
pub fn fieldIsComptime(s: @This(), ip: *const InternPool, i: usize) bool {
return s.comptime_bits.getBit(ip, i);
}
pub fn setFieldComptime(s: @This(), ip: *InternPool, i: usize) void {
s.comptime_bits.setBit(ip, i);
}
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more
/// complicated logic.
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
.Packed => false,
.Auto, .Extern => s.flagsPtr(ip).known_non_opv,
};
}
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn flagsPtr(self: @This(), ip: *InternPool) *Tag.TypeStruct.Flags {
assert(self.layout != .Packed);
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
}
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) {
flags_ptr.assumed_runtime_bits = true;
return true;
}
return false;
}
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.field_types_wip) return true;
flags_ptr.field_types_wip = true;
return false;
}
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).field_types_wip = false;
}
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.layout_wip) return true;
flags_ptr.layout_wip = true;
return false;
}
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).layout_wip = false;
}
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return false;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.alignment_wip) return true;
flags_ptr.alignment_wip = true;
return false;
}
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
if (s.layout == .Packed) return;
s.flagsPtr(ip).alignment_wip = false;
}
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
if (s.layout == .Packed) return true;
const flags_ptr = s.flagsPtr(ip);
if (flags_ptr.fully_resolved) return true;
flags_ptr.fully_resolved = true;
return false;
}
pub fn clearFullyResolved(s: @This(), ip: *InternPool) void {
s.flagsPtr(ip).fully_resolved = false;
}
/// The returned pointer expires with any addition to the `InternPool`.
/// Asserts the struct is not packed.
pub fn size(self: @This(), ip: *InternPool) *u32 {
assert(self.layout != .Packed);
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
}
/// The backing integer type of the packed struct. Whether zig chooses
/// this type or the user specifies it, it is stored here. This will be
/// set to `none` until the layout is resolved.
/// Asserts the struct is packed.
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
assert(s.layout == .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
}
/// Asserts the struct is not packed.
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: Zir.Inst.Index) void {
assert(s.layout != .Packed);
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
ip.extra.items[s.extra_index + field_index] = new_zir_index;
}
pub fn haveFieldTypes(s: @This(), ip: *const InternPool) bool {
const types = s.field_types.get(ip);
return types.len == 0 or types[0] != .none;
}
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
return switch (s.layout) {
.Packed => s.backingIntType(ip).* != .none,
.Auto, .Extern => s.flagsPtr(ip).layout_resolved,
};
}
pub fn isTuple(s: @This(), ip: *InternPool) bool {
return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
}
pub fn hasReorderedFields(s: @This()) bool {
return s.layout == .Auto;
}
pub const RuntimeOrderIterator = struct {
ip: *InternPool,
field_index: u32,
struct_type: InternPool.Key.StructType,
pub fn next(it: *@This()) ?u32 {
var i = it.field_index;
if (i >= it.struct_type.field_types.len)
return null;
if (it.struct_type.hasReorderedFields()) {
it.field_index += 1;
return it.struct_type.runtime_order.get(it.ip)[i].toInt();
}
while (it.struct_type.fieldIsComptime(it.ip, i)) {
i += 1;
if (i >= it.struct_type.field_types.len)
return null;
}
it.field_index = i + 1;
return i;
}
};
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
/// May or may not include zero-bit fields.
/// Asserts the struct is not packed.
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
assert(s.layout != .Packed);
return .{
.ip = ip,
.field_index = 0,
.struct_type = s,
};
}
};
pub const AnonStructType = struct {
@@ -382,6 +656,17 @@ pub const Key = union(enum) {
pub fn isTuple(self: AnonStructType) bool {
return self.names.len == 0;
}
pub fn fieldName(
self: AnonStructType,
ip: *const InternPool,
index: u32,
) OptionalNullTerminatedString {
if (self.names.len == 0)
return .none;
return self.names.get(ip)[index].toOptional();
}
};
/// Serves two purposes:
@@ -870,7 +1155,6 @@ pub const Key = union(enum) {
.simple_type,
.simple_value,
.opt,
.struct_type,
.undef,
.err,
.enum_literal,
@@ -893,6 +1177,7 @@ pub const Key = union(enum) {
.enum_type,
.variable,
.union_type,
.struct_type,
=> |x| Hash.hash(seed, asBytes(&x.decl)),
.int => |int| {
@@ -969,11 +1254,11 @@ pub const Key = union(enum) {
if (child == .u8_type) {
switch (aggregate.storage) {
.bytes => |bytes| for (bytes[0..@as(usize, @intCast(len))]) |byte| {
.bytes => |bytes| for (bytes[0..@intCast(len)]) |byte| {
std.hash.autoHash(&hasher, KeyTag.int);
std.hash.autoHash(&hasher, byte);
},
.elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem| {
.elems => |elems| for (elems[0..@intCast(len)]) |elem| {
const elem_key = ip.indexToKey(elem);
std.hash.autoHash(&hasher, @as(KeyTag, elem_key));
switch (elem_key) {
@@ -1123,10 +1408,6 @@ pub const Key = union(enum) {
const b_info = b.opt;
return std.meta.eql(a_info, b_info);
},
.struct_type => |a_info| {
const b_info = b.struct_type;
return std.meta.eql(a_info, b_info);
},
.un => |a_info| {
const b_info = b.un;
return std.meta.eql(a_info, b_info);
@@ -1298,6 +1579,10 @@ pub const Key = union(enum) {
const b_info = b.union_type;
return a_info.decl == b_info.decl;
},
.struct_type => |a_info| {
const b_info = b.struct_type;
return a_info.decl == b_info.decl;
},
.aggregate => |a_info| {
const b_info = b.aggregate;
if (a_info.ty != b_info.ty) return false;
@@ -1433,6 +1718,8 @@ pub const Key = union(enum) {
}
};
pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a
// minimal hashmap key, this type is a convenience type that contains info
// needed by semantic analysis.
@@ -1474,8 +1761,6 @@ pub const UnionType = struct {
}
};
pub const RequiresComptime = enum(u2) { no, yes, unknown, wip };
pub const Status = enum(u3) {
none,
field_types_wip,
@@ -1814,9 +2099,11 @@ pub const Index = enum(u32) {
type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit,
simple_type: struct { data: SimpleType },
type_opaque: struct { data: *Key.OpaqueType },
type_struct: struct { data: Module.Struct.OptionalIndex },
type_struct: struct { data: *Tag.TypeStruct },
type_struct_ns: struct { data: Module.Namespace.Index },
type_struct_anon: DataIsExtraIndexOfTypeStructAnon,
type_struct_packed: struct { data: *Tag.TypeStructPacked },
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
type_tuple_anon: DataIsExtraIndexOfTypeStructAnon,
type_union: struct { data: *Tag.TypeUnion },
type_function: struct {
@@ -2241,17 +2528,22 @@ pub const Tag = enum(u8) {
/// An opaque type.
/// data is index of Key.OpaqueType in extra.
type_opaque,
/// A struct type.
/// data is Module.Struct.OptionalIndex
/// The `none` tag is used to represent `@TypeOf(.{})`.
/// A non-packed struct type.
/// data is 0 or extra index of `TypeStruct`.
/// data == 0 represents `@TypeOf(.{})`.
type_struct,
/// A struct type that has only a namespace; no fields, and there is no
/// Module.Struct object allocated for it.
/// A non-packed struct type that has only a namespace; no fields.
/// data is Module.Namespace.Index.
type_struct_ns,
/// An AnonStructType which stores types, names, and values for fields.
/// data is extra index of `TypeStructAnon`.
type_struct_anon,
/// A packed struct, no fields have any init values.
/// data is extra index of `TypeStructPacked`.
type_struct_packed,
/// A packed struct, one or more fields have init values.
/// data is extra index of `TypeStructPacked`.
type_struct_packed_inits,
/// An AnonStructType which has only types and values for fields.
/// data is extra index of `TypeStructAnon`.
type_tuple_anon,
@@ -2461,9 +2753,10 @@ pub const Tag = enum(u8) {
.type_enum_nonexhaustive => EnumExplicit,
.simple_type => unreachable,
.type_opaque => OpaqueType,
.type_struct => unreachable,
.type_struct => TypeStruct,
.type_struct_ns => unreachable,
.type_struct_anon => TypeStructAnon,
.type_struct_packed, .type_struct_packed_inits => TypeStructPacked,
.type_tuple_anon => TypeStructAnon,
.type_union => TypeUnion,
.type_function => TypeFunction,
@@ -2634,11 +2927,90 @@ pub const Tag = enum(u8) {
any_aligned_fields: bool,
layout: std.builtin.Type.ContainerLayout,
status: UnionType.Status,
requires_comptime: UnionType.RequiresComptime,
requires_comptime: RequiresComptime,
assumed_runtime_bits: bool,
_: u21 = 0,
};
};
/// Trailing:
/// 0. type: Index for each fields_len
/// 1. name: NullTerminatedString for each fields_len
/// 2. init: Index for each fields_len // if tag is type_struct_packed_inits
pub const TypeStructPacked = struct {
decl: Module.Decl.Index,
zir_index: Zir.Inst.Index,
fields_len: u32,
namespace: Module.Namespace.OptionalIndex,
backing_int_ty: Index,
names_map: MapIndex,
};
/// At first I thought of storing the denormalized data externally, such as...
///
/// * runtime field order
/// * calculated field offsets
/// * size and alignment of the struct
///
/// ...since these can be computed based on the other data here. However,
/// this data does need to be memoized, and therefore stored in memory
/// while the compiler is running, in order to avoid O(N^2) logic in many
/// places. Since the data can be stored compactly in the InternPool
/// representation, it is better for memory usage to store denormalized data
/// here, and potentially also better for performance as well. It's also simpler
/// than coming up with some other scheme for the data.
///
/// Trailing:
/// 0. type: Index for each field in declared order
/// 1. if not is_tuple:
/// names_map: MapIndex,
/// name: NullTerminatedString // for each field in declared order
/// 2. if any_default_inits:
/// init: Index // for each field in declared order
/// 3. if has_namespace:
/// namespace: Module.Namespace.Index
/// 4. if any_aligned_fields:
/// align: Alignment // for each field in declared order
/// 5. if any_comptime_fields:
/// field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0
/// 6. if not is_extern:
/// field_index: RuntimeOrder // for each field in runtime order
/// 7. field_offset: u32 // for each field in declared order, undef until layout_resolved
pub const TypeStruct = struct {
decl: Module.Decl.Index,
zir_index: Zir.Inst.Index,
fields_len: u32,
flags: Flags,
size: u32,
pub const Flags = packed struct(u32) {
is_extern: bool,
known_non_opv: bool,
requires_comptime: RequiresComptime,
is_tuple: bool,
assumed_runtime_bits: bool,
has_namespace: bool,
any_comptime_fields: bool,
any_default_inits: bool,
any_aligned_fields: bool,
/// `undefined` until the layout_resolved
alignment: Alignment,
/// Dependency loop detection when resolving struct alignment.
alignment_wip: bool,
/// Dependency loop detection when resolving field types.
field_types_wip: bool,
/// Dependency loop detection when resolving struct layout.
layout_wip: bool,
/// Determines whether `size`, `alignment`, runtime field order, and
/// field offets are populated.
layout_resolved: bool,
// The types and all its fields have had their layout resolved. Even through pointer,
// which `layout_resolved` does not ensure.
fully_resolved: bool,
_: u11 = 0,
};
};
};
/// State that is mutable during semantic analysis. This data is not used for
@@ -2764,20 +3136,26 @@ pub const SimpleValue = enum(u32) {
/// Stored as a power-of-two, with one special value to indicate none.
pub const Alignment = enum(u6) {
@"1" = 0,
@"2" = 1,
@"4" = 2,
@"8" = 3,
@"16" = 4,
@"32" = 5,
none = std.math.maxInt(u6),
_,
pub fn toByteUnitsOptional(a: Alignment) ?u64 {
return switch (a) {
.none => null,
_ => @as(u64, 1) << @intFromEnum(a),
else => @as(u64, 1) << @intFromEnum(a),
};
}
pub fn toByteUnits(a: Alignment, default: u64) u64 {
return switch (a) {
.none => default,
_ => @as(u64, 1) << @intFromEnum(a),
else => @as(u64, 1) << @intFromEnum(a),
};
}
@@ -2792,16 +3170,95 @@ pub const Alignment = enum(u6) {
return fromByteUnits(n);
}
pub fn toLog2Units(a: Alignment) u6 {
assert(a != .none);
return @intFromEnum(a);
}
/// This is just a glorified `@enumFromInt` but using it can help
/// document the intended conversion.
/// The parameter uses a u32 for convenience at the callsite.
pub fn fromLog2Units(a: u32) Alignment {
assert(a != @intFromEnum(Alignment.none));
return @enumFromInt(a);
}
pub fn order(lhs: Alignment, rhs: Alignment) std.math.Order {
assert(lhs != .none and rhs != .none);
assert(lhs != .none);
assert(rhs != .none);
return std.math.order(@intFromEnum(lhs), @intFromEnum(rhs));
}
/// Relaxed comparison. We have this as default because a lot of callsites
/// were upgraded from directly using comparison operators on byte units,
/// with the `none` value represented by zero.
/// Prefer `compareStrict` if possible.
pub fn compare(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
return std.math.compare(lhs.toRelaxedCompareUnits(), op, rhs.toRelaxedCompareUnits());
}
pub fn compareStrict(lhs: Alignment, op: std.math.CompareOperator, rhs: Alignment) bool {
assert(lhs != .none);
assert(rhs != .none);
return std.math.compare(@intFromEnum(lhs), op, @intFromEnum(rhs));
}
/// Treats `none` as zero.
/// This matches previous behavior of using `@max` directly on byte units.
/// Prefer `maxStrict` if possible.
pub fn max(lhs: Alignment, rhs: Alignment) Alignment {
if (lhs == .none) return rhs;
if (rhs == .none) return lhs;
return maxStrict(lhs, rhs);
}
pub fn maxStrict(lhs: Alignment, rhs: Alignment) Alignment {
assert(lhs != .none);
assert(rhs != .none);
return @enumFromInt(@max(@intFromEnum(lhs), @intFromEnum(rhs)));
}
/// Treats `none` as zero.
/// This matches previous behavior of using `@min` directly on byte units.
/// Prefer `minStrict` if possible.
pub fn min(lhs: Alignment, rhs: Alignment) Alignment {
if (lhs == .none) return lhs;
if (rhs == .none) return rhs;
return minStrict(lhs, rhs);
}
pub fn minStrict(lhs: Alignment, rhs: Alignment) Alignment {
assert(lhs != .none);
assert(rhs != .none);
return @enumFromInt(@min(@intFromEnum(lhs), @intFromEnum(rhs)));
}
/// Align an address forwards to this alignment.
pub fn forward(a: Alignment, addr: u64) u64 {
assert(a != .none);
const x = (@as(u64, 1) << @intFromEnum(a)) - 1;
return (addr + x) & ~x;
}
/// Align an address backwards to this alignment.
pub fn backward(a: Alignment, addr: u64) u64 {
assert(a != .none);
const x = (@as(u64, 1) << @intFromEnum(a)) - 1;
return addr & ~x;
}
/// Check if an address is aligned to this amount.
pub fn check(a: Alignment, addr: u64) bool {
assert(a != .none);
return @ctz(addr) >= @intFromEnum(a);
}
/// An array of `Alignment` objects existing within the `extra` array.
/// This type exists to provide a struct with lifetime that is
/// not invalidated when items are added to the `InternPool`.
pub const Slice = struct {
start: u32,
/// This is the number of alignment values, not the number of u32 elements.
len: u32,
pub fn get(slice: Slice, ip: *const InternPool) []Alignment {
@@ -2811,6 +3268,23 @@ pub const Alignment = enum(u6) {
return @ptrCast(bytes[0..slice.len]);
}
};
pub fn toRelaxedCompareUnits(a: Alignment) u8 {
const n: u8 = @intFromEnum(a);
assert(n <= @intFromEnum(Alignment.none));
if (n == @intFromEnum(Alignment.none)) return 0;
return n + 1;
}
const LlvmBuilderAlignment = @import("codegen/llvm/Builder.zig").Alignment;
pub fn toLlvm(this: @This()) LlvmBuilderAlignment {
return @enumFromInt(@intFromEnum(this));
}
pub fn fromLlvm(other: LlvmBuilderAlignment) @This() {
return @enumFromInt(@intFromEnum(other));
}
};
/// Used for non-sentineled arrays that have length fitting in u32, as well as
@@ -3065,9 +3539,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.limbs.deinit(gpa);
ip.string_bytes.deinit(gpa);
ip.structs_free_list.deinit(gpa);
ip.allocated_structs.deinit(gpa);
ip.decls_free_list.deinit(gpa);
ip.allocated_decls.deinit(gpa);
@@ -3149,24 +3620,43 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) },
.type_struct => {
const struct_index: Module.Struct.OptionalIndex = @enumFromInt(data);
const namespace = if (struct_index.unwrap()) |i|
ip.structPtrConst(i).namespace.toOptional()
else
.none;
return .{ .struct_type = .{
.index = struct_index,
.namespace = namespace,
} };
},
.type_struct => .{ .struct_type = if (data == 0) .{
.extra_index = 0,
.namespace = .none,
.decl = .none,
.zir_index = @as(u32, undefined),
.layout = .Auto,
.field_names = .{ .start = 0, .len = 0 },
.field_types = .{ .start = 0, .len = 0 },
.field_inits = .{ .start = 0, .len = 0 },
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = undefined,
} else extraStructType(ip, data) },
.type_struct_ns => .{ .struct_type = .{
.index = .none,
.extra_index = 0,
.namespace = @as(Module.Namespace.Index, @enumFromInt(data)).toOptional(),
.decl = .none,
.zir_index = @as(u32, undefined),
.layout = .Auto,
.field_names = .{ .start = 0, .len = 0 },
.field_types = .{ .start = 0, .len = 0 },
.field_inits = .{ .start = 0, .len = 0 },
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = undefined,
} },
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
.type_struct_packed => .{ .struct_type = extraPackedStructType(ip, data, false) },
.type_struct_packed_inits => .{ .struct_type = extraPackedStructType(ip, data, true) },
.type_union => .{ .union_type = extraUnionType(ip, data) },
.type_enum_auto => {
@@ -3441,7 +3931,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.func_decl => .{ .func = ip.extraFuncDecl(data) },
.func_coerced => .{ .func = ip.extraFuncCoerced(data) },
.only_possible_value => {
const ty = @as(Index, @enumFromInt(data));
const ty: Index = @enumFromInt(data);
const ty_item = ip.items.get(@intFromEnum(ty));
return switch (ty_item.tag) {
.type_array_big => {
@@ -3454,20 +3944,33 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.storage = .{ .elems = sentinel[0..@intFromBool(sentinel[0] != .none)] },
} };
},
.type_array_small, .type_vector => .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = &.{} },
} },
// TODO: migrate structs to properly use the InternPool rather
// than using the SegmentedList trick, then the struct type will
// have a slice of comptime values that can be used here for when
// the struct has one possible value due to all fields comptime (same
// as the tuple case below).
.type_struct, .type_struct_ns => .{ .aggregate = .{
.type_array_small,
.type_vector,
.type_struct_ns,
.type_struct_packed,
=> .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = &.{} },
} },
// There is only one possible value precisely due to the
// fact that this values slice is fully populated!
.type_struct => {
const info = extraStructType(ip, ty_item.data);
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
} };
},
.type_struct_packed_inits => {
const info = extraPackedStructType(ip, ty_item.data, true);
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @ptrCast(info.field_inits.get(ip)) },
} };
},
// There is only one possible value precisely due to the
// fact that this values slice is fully populated!
.type_struct_anon, .type_tuple_anon => {
@@ -3476,7 +3979,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len];
return .{ .aggregate = .{
.ty = ty,
.storage = .{ .elems = @as([]const Index, @ptrCast(values)) },
.storage = .{ .elems = @ptrCast(values) },
} };
},
@@ -3490,7 +3993,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.bytes => {
const extra = ip.extraData(Bytes, data);
const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty)));
const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty));
return .{ .aggregate = .{
.ty = extra.ty,
.storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] },
@@ -3498,8 +4001,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.aggregate => {
const extra = ip.extraDataTrail(Tag.Aggregate, data);
const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty)));
const fields = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..len]));
const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.data.ty));
const fields: []const Index = @ptrCast(ip.extra.items[extra.end..][0..len]);
return .{ .aggregate = .{
.ty = extra.data.ty,
.storage = .{ .elems = fields },
@@ -3603,6 +4106,109 @@ fn extraTypeTupleAnon(ip: *const InternPool, extra_index: u32) Key.AnonStructTyp
};
}
fn extraStructType(ip: *const InternPool, extra_index: u32) Key.StructType {
const s = ip.extraDataTrail(Tag.TypeStruct, extra_index);
const fields_len = s.data.fields_len;
var index = s.end;
const field_types = t: {
const types: Index.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t types;
};
const names_map, const field_names: NullTerminatedString.Slice = t: {
if (s.data.flags.is_tuple) break :t .{ .none, .{ .start = 0, .len = 0 } };
const names_map: MapIndex = @enumFromInt(ip.extra.items[index]);
index += 1;
const names: NullTerminatedString.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t .{ names_map.toOptional(), names };
};
const field_inits: Index.Slice = t: {
if (!s.data.flags.any_default_inits) break :t .{ .start = 0, .len = 0 };
const inits: Index.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t inits;
};
const namespace = t: {
if (!s.data.flags.has_namespace) break :t .none;
const namespace: Module.Namespace.Index = @enumFromInt(ip.extra.items[index]);
index += 1;
break :t namespace.toOptional();
};
const field_aligns: Alignment.Slice = t: {
if (!s.data.flags.any_aligned_fields) break :t .{ .start = 0, .len = 0 };
const aligns: Alignment.Slice = .{ .start = index, .len = fields_len };
index += (fields_len + 3) / 4;
break :t aligns;
};
const comptime_bits: Key.StructType.ComptimeBits = t: {
if (!s.data.flags.any_comptime_fields) break :t .{ .start = 0, .len = 0 };
const comptime_bits: Key.StructType.ComptimeBits = .{ .start = index, .len = fields_len };
index += (fields_len + 31) / 32;
break :t comptime_bits;
};
const runtime_order: Key.StructType.RuntimeOrder.Slice = t: {
if (s.data.flags.is_extern) break :t .{ .start = 0, .len = 0 };
const ro: Key.StructType.RuntimeOrder.Slice = .{ .start = index, .len = fields_len };
index += fields_len;
break :t ro;
};
const offsets = t: {
const offsets: Key.StructType.Offsets = .{ .start = index, .len = fields_len };
index += fields_len;
break :t offsets;
};
return .{
.extra_index = extra_index,
.decl = s.data.decl.toOptional(),
.zir_index = s.data.zir_index,
.layout = if (s.data.flags.is_extern) .Extern else .Auto,
.field_types = field_types,
.names_map = names_map,
.field_names = field_names,
.field_inits = field_inits,
.namespace = namespace,
.field_aligns = field_aligns,
.comptime_bits = comptime_bits,
.runtime_order = runtime_order,
.offsets = offsets,
};
}
fn extraPackedStructType(ip: *const InternPool, extra_index: u32, inits: bool) Key.StructType {
const type_struct_packed = ip.extraDataTrail(Tag.TypeStructPacked, extra_index);
const fields_len = type_struct_packed.data.fields_len;
return .{
.extra_index = extra_index,
.decl = type_struct_packed.data.decl.toOptional(),
.namespace = type_struct_packed.data.namespace,
.zir_index = type_struct_packed.data.zir_index,
.layout = .Packed,
.field_types = .{
.start = type_struct_packed.end,
.len = fields_len,
},
.field_names = .{
.start = type_struct_packed.end + fields_len,
.len = fields_len,
},
.field_inits = if (inits) .{
.start = type_struct_packed.end + fields_len + fields_len,
.len = fields_len,
} else .{
.start = 0,
.len = 0,
},
.field_aligns = .{ .start = 0, .len = 0 },
.runtime_order = .{ .start = 0, .len = 0 },
.comptime_bits = .{ .start = 0, .len = 0 },
.offsets = .{ .start = 0, .len = 0 },
.names_map = type_struct_packed.data.names_map.toOptional(),
};
}
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
var index: usize = type_function.end;
@@ -3831,8 +4437,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.error_set_type => |error_set_type| {
assert(error_set_type.names_map == .none);
assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan));
const names_map = try ip.addMap(gpa);
try addStringsToMap(ip, gpa, names_map, error_set_type.names.get(ip));
const names = error_set_type.names.get(ip);
const names_map = try ip.addMap(gpa, names.len);
addStringsToMap(ip, names_map, names);
const names_len = error_set_type.names.len;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len);
ip.items.appendAssumeCapacity(.{
@@ -3877,21 +4484,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
.struct_type => |struct_type| {
ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{
.tag = .type_struct,
.data = @intFromEnum(i),
} else if (struct_type.namespace.unwrap()) |i| .{
.tag = .type_struct_ns,
.data = @intFromEnum(i),
} else .{
.tag = .type_struct,
.data = @intFromEnum(Module.Struct.OptionalIndex.none),
});
},
.struct_type => unreachable, // use getStructType() instead
.anon_struct_type => unreachable, // use getAnonStructType() instead
.union_type => unreachable, // use getUnionType() instead
.opaque_type => |opaque_type| {
@@ -3994,7 +4588,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
},
.struct_type => |struct_type| {
assert(ptr.addr == .field);
assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count());
assert(base_index.index < struct_type.field_types.len);
},
.union_type => |union_key| {
const union_type = ip.loadUnionType(union_key);
@@ -4388,12 +4982,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
assert(ip.typeOf(elem) == child);
}
},
.struct_type => |struct_type| {
for (
aggregate.storage.values(),
ip.structPtrUnwrapConst(struct_type.index).?.fields.values(),
) |elem, field| {
assert(ip.typeOf(elem) == field.ty.toIntern());
.struct_type => |t| {
for (aggregate.storage.values(), t.field_types.get(ip)) |elem, field_ty| {
assert(ip.typeOf(elem) == field_ty);
}
},
.anon_struct_type => |anon_struct_type| {
@@ -4635,6 +5226,138 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, ini: UnionTypeInit) Allocat
return @enumFromInt(ip.items.len - 1);
}
pub const StructTypeInit = struct {
decl: Module.Decl.Index,
namespace: Module.Namespace.OptionalIndex,
layout: std.builtin.Type.ContainerLayout,
zir_index: Zir.Inst.Index,
fields_len: u32,
known_non_opv: bool,
requires_comptime: RequiresComptime,
is_tuple: bool,
any_comptime_fields: bool,
any_default_inits: bool,
any_aligned_fields: bool,
};
pub fn getStructType(
ip: *InternPool,
gpa: Allocator,
ini: StructTypeInit,
) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const key: Key = .{
.struct_type = .{
// Only the decl matters for hashing and equality purposes.
.decl = ini.decl.toOptional(),
.extra_index = undefined,
.namespace = undefined,
.zir_index = undefined,
.layout = undefined,
.field_names = undefined,
.field_types = undefined,
.field_inits = undefined,
.field_aligns = undefined,
.runtime_order = undefined,
.comptime_bits = undefined,
.offsets = undefined,
.names_map = undefined,
},
};
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
errdefer _ = ip.map.pop();
const names_map = try ip.addMap(gpa, ini.fields_len);
errdefer _ = ip.maps.pop();
const is_extern = switch (ini.layout) {
.Auto => false,
.Extern => true,
.Packed => {
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len +
ini.fields_len + // types
ini.fields_len + // names
ini.fields_len); // inits
try ip.items.append(gpa, .{
.tag = if (ini.any_default_inits) .type_struct_packed_inits else .type_struct_packed,
.data = ip.addExtraAssumeCapacity(Tag.TypeStructPacked{
.decl = ini.decl,
.zir_index = ini.zir_index,
.fields_len = ini.fields_len,
.namespace = ini.namespace,
.backing_int_ty = .none,
.names_map = names_map,
}),
});
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len);
if (ini.any_default_inits) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
}
return @enumFromInt(ip.items.len - 1);
},
};
const align_elements_len = if (ini.any_aligned_fields) (ini.fields_len + 3) / 4 else 0;
const align_element: u32 = @bitCast([1]u8{@intFromEnum(Alignment.none)} ** 4);
const comptime_elements_len = if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStruct).Struct.fields.len +
(ini.fields_len * 5) + // types, names, inits, runtime order, offsets
align_elements_len + comptime_elements_len +
2); // names_map + namespace
try ip.items.append(gpa, .{
.tag = .type_struct,
.data = ip.addExtraAssumeCapacity(Tag.TypeStruct{
.decl = ini.decl,
.zir_index = ini.zir_index,
.fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.flags = .{
.is_extern = is_extern,
.known_non_opv = ini.known_non_opv,
.requires_comptime = ini.requires_comptime,
.is_tuple = ini.is_tuple,
.assumed_runtime_bits = false,
.has_namespace = ini.namespace != .none,
.any_comptime_fields = ini.any_comptime_fields,
.any_default_inits = ini.any_default_inits,
.any_aligned_fields = ini.any_aligned_fields,
.alignment = .none,
.alignment_wip = false,
.field_types_wip = false,
.layout_wip = false,
.layout_resolved = false,
.fully_resolved = false,
},
}),
});
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
if (!ini.is_tuple) {
ip.extra.appendAssumeCapacity(@intFromEnum(names_map));
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(OptionalNullTerminatedString.none), ini.fields_len);
}
if (ini.any_default_inits) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), ini.fields_len);
}
if (ini.namespace.unwrap()) |namespace| {
ip.extra.appendAssumeCapacity(@intFromEnum(namespace));
}
if (ini.any_aligned_fields) {
ip.extra.appendNTimesAssumeCapacity(align_element, align_elements_len);
}
if (ini.any_comptime_fields) {
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len);
}
if (ini.layout == .Auto) {
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Key.StructType.RuntimeOrder.unresolved), ini.fields_len);
}
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len);
return @enumFromInt(ip.items.len - 1);
}
pub const AnonStructTypeInit = struct {
types: []const Index,
/// This may be empty, indicating this is a tuple.
@@ -4997,10 +5720,11 @@ pub fn getErrorSetType(
});
errdefer ip.items.len -= 1;
const names_map = try ip.addMap(gpa);
const names_map = try ip.addMap(gpa, names.len);
assert(names_map == predicted_names_map);
errdefer _ = ip.maps.pop();
try addStringsToMap(ip, gpa, names_map, names);
addStringsToMap(ip, names_map, names);
return @enumFromInt(ip.items.len - 1);
}
@@ -5299,19 +6023,9 @@ pub const IncompleteEnumType = struct {
pub fn addFieldName(
self: @This(),
ip: *InternPool,
gpa: Allocator,
name: NullTerminatedString,
) Allocator.Error!?u32 {
const map = &ip.maps.items[@intFromEnum(self.names_map)];
const field_index = map.count();
const strings = ip.extra.items[self.names_start..][0..field_index];
const adapter: NullTerminatedString.Adapter = .{
.strings = @as([]const NullTerminatedString, @ptrCast(strings)),
};
const gop = try map.getOrPutAdapted(gpa, name, adapter);
if (gop.found_existing) return @intCast(gop.index);
ip.extra.items[self.names_start + field_index] = @intFromEnum(name);
return null;
) ?u32 {
return ip.addFieldName(self.names_map, self.names_start, name);
}
/// Returns the already-existing field with the same value, if any.
@@ -5319,17 +6033,14 @@ pub const IncompleteEnumType = struct {
pub fn addFieldValue(
self: @This(),
ip: *InternPool,
gpa: Allocator,
value: Index,
) Allocator.Error!?u32 {
) ?u32 {
assert(ip.typeOf(value) == @as(Index, @enumFromInt(ip.extra.items[self.tag_ty_index])));
const map = &ip.maps.items[@intFromEnum(self.values_map.unwrap().?)];
const field_index = map.count();
const indexes = ip.extra.items[self.values_start..][0..field_index];
const adapter: Index.Adapter = .{
.indexes = @as([]const Index, @ptrCast(indexes)),
};
const gop = try map.getOrPutAdapted(gpa, value, adapter);
const adapter: Index.Adapter = .{ .indexes = @ptrCast(indexes) };
const gop = map.getOrPutAssumeCapacityAdapted(value, adapter);
if (gop.found_existing) return @intCast(gop.index);
ip.extra.items[self.values_start + field_index] = @intFromEnum(value);
return null;
@@ -5370,7 +6081,7 @@ fn getIncompleteEnumAuto(
const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
assert(!gop.found_existing);
const names_map = try ip.addMap(gpa);
const names_map = try ip.addMap(gpa, enum_type.fields_len);
const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len;
try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len);
@@ -5390,7 +6101,7 @@ fn getIncompleteEnumAuto(
});
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), enum_type.fields_len);
return .{
.index = @as(Index, @enumFromInt(ip.items.len - 1)),
.index = @enumFromInt(ip.items.len - 1),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?,
.names_map = names_map,
.names_start = extra_index + extra_fields_len,
@@ -5412,9 +6123,9 @@ fn getIncompleteEnumExplicit(
const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter);
assert(!gop.found_existing);
const names_map = try ip.addMap(gpa);
const names_map = try ip.addMap(gpa, enum_type.fields_len);
const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: {
const values_map = try ip.addMap(gpa);
const values_map = try ip.addMap(gpa, enum_type.fields_len);
break :m values_map.toOptional();
};
@@ -5441,7 +6152,7 @@ fn getIncompleteEnumExplicit(
// This is both fields and values (if present).
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(Index.none), reserved_len);
return .{
.index = @as(Index, @enumFromInt(ip.items.len - 1)),
.index = @enumFromInt(ip.items.len - 1),
.tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?,
.names_map = names_map,
.names_start = extra_index + extra_fields_len,
@@ -5484,8 +6195,8 @@ pub fn getEnum(ip: *InternPool, gpa: Allocator, ini: GetEnumInit) Allocator.Erro
switch (ini.tag_mode) {
.auto => {
const names_map = try ip.addMap(gpa);
try addStringsToMap(ip, gpa, names_map, ini.names);
const names_map = try ip.addMap(gpa, ini.names.len);
addStringsToMap(ip, names_map, ini.names);
const fields_len: u32 = @intCast(ini.names.len);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len +
@@ -5514,12 +6225,12 @@ pub fn finishGetEnum(
ini: GetEnumInit,
tag: Tag,
) Allocator.Error!Index {
const names_map = try ip.addMap(gpa);
try addStringsToMap(ip, gpa, names_map, ini.names);
const names_map = try ip.addMap(gpa, ini.names.len);
addStringsToMap(ip, names_map, ini.names);
const values_map: OptionalMapIndex = if (ini.values.len == 0) .none else m: {
const values_map = try ip.addMap(gpa);
try addIndexesToMap(ip, gpa, values_map, ini.values);
const values_map = try ip.addMap(gpa, ini.values.len);
addIndexesToMap(ip, values_map, ini.values);
break :m values_map.toOptional();
};
const fields_len: u32 = @intCast(ini.names.len);
@@ -5553,35 +6264,35 @@ pub fn getAssumeExists(ip: *const InternPool, key: Key) Index {
fn addStringsToMap(
ip: *InternPool,
gpa: Allocator,
map_index: MapIndex,
strings: []const NullTerminatedString,
) Allocator.Error!void {
) void {
const map = &ip.maps.items[@intFromEnum(map_index)];
const adapter: NullTerminatedString.Adapter = .{ .strings = strings };
for (strings) |string| {
const gop = try map.getOrPutAdapted(gpa, string, adapter);
const gop = map.getOrPutAssumeCapacityAdapted(string, adapter);
assert(!gop.found_existing);
}
}
fn addIndexesToMap(
ip: *InternPool,
gpa: Allocator,
map_index: MapIndex,
indexes: []const Index,
) Allocator.Error!void {
) void {
const map = &ip.maps.items[@intFromEnum(map_index)];
const adapter: Index.Adapter = .{ .indexes = indexes };
for (indexes) |index| {
const gop = try map.getOrPutAdapted(gpa, index, adapter);
const gop = map.getOrPutAssumeCapacityAdapted(index, adapter);
assert(!gop.found_existing);
}
}
fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex {
fn addMap(ip: *InternPool, gpa: Allocator, cap: usize) Allocator.Error!MapIndex {
const ptr = try ip.maps.addOne(gpa);
errdefer _ = ip.maps.pop();
ptr.* = .{};
try ptr.ensureTotalCapacity(gpa, cap);
return @enumFromInt(ip.maps.items.len - 1);
}
@@ -5632,8 +6343,9 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
Tag.TypePointer.Flags,
Tag.TypeFunction.Flags,
Tag.TypePointer.PackedOffset,
Tag.Variable.Flags,
Tag.TypeUnion.Flags,
Tag.TypeStruct.Flags,
Tag.Variable.Flags,
=> @bitCast(@field(extra, field.name)),
else => @compileError("bad field type: " ++ @typeName(field.type)),
@@ -5705,6 +6417,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
Tag.TypeFunction.Flags,
Tag.TypePointer.PackedOffset,
Tag.TypeUnion.Flags,
Tag.TypeStruct.Flags,
Tag.Variable.Flags,
FuncAnalysis,
=> @bitCast(int32),
@@ -6093,8 +6806,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
const new_elem_ty = switch (ip.indexToKey(new_ty)) {
inline .array_type, .vector_type => |seq_type| seq_type.child,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
.struct_type => |struct_type| ip.structPtr(struct_type.index.unwrap().?)
.fields.values()[i].ty.toIntern(),
.struct_type => |struct_type| struct_type.field_types.get(ip)[i],
else => unreachable,
};
elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty);
@@ -6206,25 +6918,6 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind
} });
}
pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex {
assert(val != .none);
const tags = ip.items.items(.tag);
if (tags[@intFromEnum(val)] != .type_struct) return .none;
const datas = ip.items.items(.data);
return @as(Module.Struct.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
}
pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex {
assert(val != .none);
const tags = ip.items.items(.tag);
switch (tags[@intFromEnum(val)]) {
.type_union => {},
else => return .none,
}
const datas = ip.items.items(.data);
return @as(Module.Union.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
}
pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
assert(val != .none);
const tags = ip.items.items(.tag);
@@ -6337,20 +7030,16 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
const items_size = (1 + 4) * ip.items.len;
const extra_size = 4 * ip.extra.items.len;
const limbs_size = 8 * ip.limbs.items.len;
// TODO: fields size is not taken into account
const structs_size = ip.allocated_structs.len *
(@sizeOf(Module.Struct) + @sizeOf(Module.Namespace));
const decls_size = ip.allocated_decls.len * @sizeOf(Module.Decl);
// TODO: map overhead size is not taken into account
const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + structs_size + decls_size;
const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size;
std.debug.print(
\\InternPool size: {d} bytes
\\ {d} items: {d} bytes
\\ {d} extra: {d} bytes
\\ {d} limbs: {d} bytes
\\ {d} structs: {d} bytes
\\ {d} decls: {d} bytes
\\
, .{
@@ -6361,8 +7050,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
extra_size,
ip.limbs.items.len,
limbs_size,
ip.allocated_structs.len,
structs_size,
ip.allocated_decls.len,
decls_size,
});
@@ -6399,17 +7086,40 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.type_enum_auto => @sizeOf(EnumAuto),
.type_opaque => @sizeOf(Key.OpaqueType),
.type_struct => b: {
const struct_index = @as(Module.Struct.Index, @enumFromInt(data));
const struct_obj = ip.structPtrConst(struct_index);
break :b @sizeOf(Module.Struct) +
@sizeOf(Module.Namespace) +
(struct_obj.fields.count() * @sizeOf(Module.Struct.Field));
const info = ip.extraData(Tag.TypeStruct, data);
var ints: usize = @typeInfo(Tag.TypeStruct).Struct.fields.len;
ints += info.fields_len; // types
if (!info.flags.is_tuple) {
ints += 1; // names_map
ints += info.fields_len; // names
}
if (info.flags.any_default_inits)
ints += info.fields_len; // inits
ints += @intFromBool(info.flags.has_namespace); // namespace
if (info.flags.any_aligned_fields)
ints += (info.fields_len + 3) / 4; // aligns
if (info.flags.any_comptime_fields)
ints += (info.fields_len + 31) / 32; // comptime bits
if (!info.flags.is_extern)
ints += info.fields_len; // runtime order
ints += info.fields_len; // offsets
break :b @sizeOf(u32) * ints;
},
.type_struct_ns => @sizeOf(Module.Namespace),
.type_struct_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
},
.type_struct_packed => b: {
const info = ip.extraData(Tag.TypeStructPacked, data);
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
info.fields_len + info.fields_len);
},
.type_struct_packed_inits => b: {
const info = ip.extraData(Tag.TypeStructPacked, data);
break :b @sizeOf(u32) * (@typeInfo(Tag.TypeStructPacked).Struct.fields.len +
info.fields_len + info.fields_len + info.fields_len);
},
.type_tuple_anon => b: {
const info = ip.extraData(TypeStructAnon, data);
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len);
@@ -6562,6 +7272,8 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
.type_tuple_anon,
.type_union,
.type_function,
@@ -6677,18 +7389,6 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
try bw.flush();
}
pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct {
return ip.allocated_structs.at(@intFromEnum(index));
}
pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct {
return ip.allocated_structs.at(@intFromEnum(index));
}
pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct {
return structPtrConst(ip, index.unwrap() orelse return null);
}
pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl {
return ip.allocated_decls.at(@intFromEnum(index));
}
@@ -6701,28 +7401,6 @@ pub fn namespacePtr(ip: *InternPool, index: Module.Namespace.Index) *Module.Name
return ip.allocated_namespaces.at(@intFromEnum(index));
}
pub fn createStruct(
ip: *InternPool,
gpa: Allocator,
initialization: Module.Struct,
) Allocator.Error!Module.Struct.Index {
if (ip.structs_free_list.popOrNull()) |index| {
ip.allocated_structs.at(@intFromEnum(index)).* = initialization;
return index;
}
const ptr = try ip.allocated_structs.addOne(gpa);
ptr.* = initialization;
return @enumFromInt(ip.allocated_structs.len - 1);
}
pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void {
ip.structPtr(index).* = undefined;
ip.structs_free_list.append(gpa, index) catch {
// In order to keep `destroyStruct` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the Struct until garbage collection.
};
}
pub fn createDecl(
ip: *InternPool,
gpa: Allocator,
@@ -6967,6 +7645,8 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
.type_tuple_anon,
.type_union,
.type_function,
@@ -7056,7 +7736,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
.struct_type => |struct_type| struct_type.field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len,
.vector_type => |vector_type| vector_type.len,
@@ -7066,7 +7746,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(),
.struct_type => |struct_type| struct_type.field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none),
.vector_type => |vector_type| vector_type.len,
@@ -7301,6 +7981,8 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
.type_tuple_anon,
=> .Struct,
@@ -7526,6 +8208,40 @@ pub fn resolveBuiltinType(ip: *InternPool, want_index: Index, resolved_index: In
.data = @intFromEnum(SimpleValue.@"unreachable"),
});
} else {
// TODO: add the index to a free-list for reuse
// Here we could add the index to a free-list for reuse, but since
// there is so little garbage created this way it's not worth it.
}
}
pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index {
return ip.indexToKey(i).anon_struct_type.types;
}
pub fn anonStructFieldsLen(ip: *const InternPool, i: Index) u32 {
return @intCast(ip.indexToKey(i).anon_struct_type.types.len);
}
/// Asserts the type is a struct.
pub fn structDecl(ip: *const InternPool, i: Index) Module.Decl.OptionalIndex {
return switch (ip.indexToKey(i)) {
.struct_type => |t| t.decl,
else => unreachable,
};
}
/// Returns the already-existing field with the same name, if any.
pub fn addFieldName(
ip: *InternPool,
names_map: MapIndex,
names_start: u32,
name: NullTerminatedString,
) ?u32 {
const map = &ip.maps.items[@intFromEnum(names_map)];
const field_index = map.count();
const strings = ip.extra.items[names_start..][0..field_index];
const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) };
const gop = map.getOrPutAssumeCapacityAdapted(name, adapter);
if (gop.found_existing) return @intCast(gop.index);
ip.extra.items[names_start + field_index] = @intFromEnum(name);
return null;
}
+191 -385
View File
@@ -105,8 +105,6 @@ comptime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternP
/// To be eliminated in a future commit by moving more data into InternPool.
/// Current uses that must be eliminated:
/// * Struct comptime_args
/// * Struct optimized_order
/// * comptime pointer mutation
/// This memory lives until the Module is destroyed.
tmp_hack_arena: std.heap.ArenaAllocator,
@@ -678,14 +676,10 @@ pub const Decl = struct {
/// If the Decl owns its value and it is a struct, return it,
/// otherwise null.
pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct {
return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod));
}
pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex {
if (!decl.owns_tv) return .none;
if (decl.val.ip_index == .none) return .none;
return mod.intern_pool.indexToStructType(decl.val.toIntern());
pub fn getOwnedStruct(decl: Decl, mod: *Module) ?InternPool.Key.StructType {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return mod.typeToStruct(decl.val.toType());
}
/// If the Decl owns its value and it is a union, return it,
@@ -795,9 +789,10 @@ pub const Decl = struct {
return decl.getExternDecl(mod) != .none;
}
pub fn getAlignment(decl: Decl, mod: *Module) u32 {
pub fn getAlignment(decl: Decl, mod: *Module) Alignment {
assert(decl.has_tv);
return @as(u32, @intCast(decl.alignment.toByteUnitsOptional() orelse decl.ty.abiAlignment(mod)));
if (decl.alignment != .none) return decl.alignment;
return decl.ty.abiAlignment(mod);
}
};
@@ -806,218 +801,6 @@ pub const EmitH = struct {
fwd_decl: ArrayListUnmanaged(u8) = .{},
};
pub const PropertyBoolean = enum { no, yes, unknown, wip };
/// Represents the data that a struct declaration provides.
pub const Struct = struct {
/// Set of field names in declaration order.
fields: Fields,
/// Represents the declarations inside this struct.
namespace: Namespace.Index,
/// The Decl that corresponds to the struct itself.
owner_decl: Decl.Index,
/// Index of the struct_decl ZIR instruction.
zir_index: Zir.Inst.Index,
/// Indexes into `fields` sorted to be most memory efficient.
optimized_order: ?[*]u32 = null,
layout: std.builtin.Type.ContainerLayout,
/// If the layout is not packed, this is the noreturn type.
/// If the layout is packed, this is the backing integer type of the packed struct.
/// Whether zig chooses this type or the user specifies it, it is stored here.
/// This will be set to the noreturn type until status is `have_layout`.
backing_int_ty: Type = Type.noreturn,
status: enum {
none,
field_types_wip,
have_field_types,
layout_wip,
have_layout,
fully_resolved_wip,
// The types and all its fields have had their layout resolved. Even through pointer,
// which `have_layout` does not ensure.
fully_resolved,
},
/// If true, has more than one possible value. However it may still be non-runtime type
/// if it is a comptime-only type.
/// If false, resolving the fields is necessary to determine whether the type has only
/// one possible value.
known_non_opv: bool,
requires_comptime: PropertyBoolean = .unknown,
have_field_inits: bool = false,
is_tuple: bool,
assumed_runtime_bits: bool = false,
pub const Index = enum(u32) {
_,
pub fn toOptional(i: Index) OptionalIndex {
return @as(OptionalIndex, @enumFromInt(@intFromEnum(i)));
}
};
pub const OptionalIndex = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn init(oi: ?Index) OptionalIndex {
return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
}
pub fn unwrap(oi: OptionalIndex) ?Index {
if (oi == .none) return null;
return @as(Index, @enumFromInt(@intFromEnum(oi)));
}
};
pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field);
/// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
pub const Field = struct {
/// Uses `noreturn` to indicate `anytype`.
/// undefined until `status` is >= `have_field_types`.
ty: Type,
/// Uses `none` to indicate no default.
default_val: InternPool.Index,
/// Zero means to use the ABI alignment of the type.
abi_align: Alignment,
/// undefined until `status` is `have_layout`.
offset: u32,
/// If true then `default_val` is the comptime field value.
is_comptime: bool,
/// Returns the field alignment. If the struct is packed, returns 0.
/// Keep implementation in sync with `Sema.structFieldAlignment`.
pub fn alignment(
field: Field,
mod: *Module,
layout: std.builtin.Type.ContainerLayout,
) u32 {
if (field.abi_align.toByteUnitsOptional()) |abi_align| {
assert(layout != .Packed);
return @as(u32, @intCast(abi_align));
}
const target = mod.getTarget();
switch (layout) {
.Packed => return 0,
.Auto => {
if (target.ofmt == .c) {
return alignmentExtern(field, mod);
} else {
return field.ty.abiAlignment(mod);
}
},
.Extern => return alignmentExtern(field, mod),
}
}
pub fn alignmentExtern(field: Field, mod: *Module) u32 {
// This logic is duplicated in Type.abiAlignmentAdvanced.
const ty_abi_align = field.ty.abiAlignment(mod);
if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
return @max(ty_abi_align, 16);
}
return ty_abi_align;
}
};
/// Used in `optimized_order` to indicate field that is not present in the
/// runtime version of the struct.
pub const omitted_field = std.math.maxInt(u32);
pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString {
return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod);
}
pub fn srcLoc(s: Struct, mod: *Module) SrcLoc {
return mod.declPtr(s.owner_decl).srcLoc(mod);
}
pub fn haveFieldTypes(s: Struct) bool {
return switch (s.status) {
.none,
.field_types_wip,
=> false,
.have_field_types,
.layout_wip,
.have_layout,
.fully_resolved_wip,
.fully_resolved,
=> true,
};
}
pub fn haveLayout(s: Struct) bool {
return switch (s.status) {
.none,
.field_types_wip,
.have_field_types,
.layout_wip,
=> false,
.have_layout,
.fully_resolved_wip,
.fully_resolved,
=> true,
};
}
pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 {
assert(s.layout == .Packed);
assert(s.haveLayout());
var bit_sum: u64 = 0;
for (s.fields.values(), 0..) |field, i| {
if (i == index) {
return @as(u16, @intCast(bit_sum));
}
bit_sum += field.ty.bitSize(mod);
}
unreachable; // index out of bounds
}
pub const RuntimeFieldIterator = struct {
module: *Module,
struct_obj: *const Struct,
index: u32 = 0,
pub const FieldAndIndex = struct {
field: Field,
index: u32,
};
pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex {
const mod = it.module;
while (true) {
var i = it.index;
it.index += 1;
if (it.struct_obj.fields.count() <= i)
return null;
if (it.struct_obj.optimized_order) |some| {
i = some[i];
if (i == Module.Struct.omitted_field) return null;
}
const field = it.struct_obj.fields.values()[i];
if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) {
return FieldAndIndex{ .index = i, .field = field };
}
}
}
};
pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator {
return .{
.struct_obj = s,
.module = module,
};
}
};
pub const DeclAdapter = struct {
mod: *Module,
@@ -2893,20 +2676,10 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
return mod.intern_pool.namespacePtr(index);
}
pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index);
}
pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
return mod.namespacePtr(index.unwrap() orelse return null);
}
/// This one accepts an index from the InternPool and asserts that it is not
/// the anonymous empty struct type.
pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct {
return mod.structPtr(index.unwrap() orelse return null);
}
/// Returns true if and only if the Decl is the top level struct associated with a File.
pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
const decl = mod.declPtr(decl_index);
@@ -3351,11 +3124,11 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
if (!decl.owns_tv) continue;
if (decl.getOwnedStruct(mod)) |struct_obj| {
struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse {
if (decl.getOwnedStruct(mod)) |struct_type| {
struct_type.setZirIndex(ip, inst_map.get(struct_type.zir_index) orelse {
try file.deleted_decls.append(gpa, decl_index);
continue;
};
});
}
if (decl.getOwnedUnion(mod)) |union_type| {
@@ -3870,36 +3643,16 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
const new_decl = mod.declPtr(new_decl_index);
errdefer @panic("TODO error handling");
const struct_index = try mod.createStruct(.{
.owner_decl = new_decl_index,
.fields = .{},
.zir_index = undefined, // set below
.layout = .Auto,
.status = .none,
.known_non_opv = undefined,
.is_tuple = undefined, // set below
.namespace = new_namespace_index,
});
errdefer mod.destroyStruct(struct_index);
const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
.index = struct_index.toOptional(),
.namespace = new_namespace_index.toOptional(),
} });
// TODO: figure out InternPool removals for incremental compilation
//errdefer mod.intern_pool.remove(struct_ty);
new_namespace.ty = struct_ty.toType();
file.root_decl = new_decl_index.toOptional();
new_decl.name = try file.fullyQualifiedName(mod);
new_decl.name_fully_qualified = true;
new_decl.src_line = 0;
new_decl.is_pub = true;
new_decl.is_exported = false;
new_decl.has_align = false;
new_decl.has_linksection_or_addrspace = false;
new_decl.ty = Type.type;
new_decl.val = struct_ty.toValue();
new_decl.alignment = .none;
new_decl.@"linksection" = .none;
new_decl.has_tv = true;
@@ -3907,75 +3660,76 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive.
new_decl.analysis = .in_progress;
new_decl.generation = mod.generation;
new_decl.name_fully_qualified = true;
if (file.status == .success_zir) {
assert(file.zir_loaded);
const main_struct_inst = Zir.main_struct_inst;
const struct_obj = mod.structPtr(struct_index);
struct_obj.zir_index = main_struct_inst;
const extended = file.zir.instructions.items(.data)[main_struct_inst].extended;
const small = @as(Zir.Inst.StructDecl.Small, @bitCast(extended.small));
struct_obj.is_tuple = small.is_tuple;
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
const sema_arena_allocator = sema_arena.allocator();
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = sema_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| {
for (comptime_mutable_decls.items) |decl_index| {
const decl = mod.declPtr(decl_index);
_ = try decl.internValue(mod);
}
new_decl.analysis = .complete;
} else |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
}
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
const source = file.getSource(gpa) catch |err| {
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
const resolved_path = std.fs.path.resolve(
gpa,
if (file.pkg.root_src_directory.path) |pkg_path|
&[_][]const u8{ pkg_path, file.sub_file_path }
else
&[_][]const u8{file.sub_file_path},
) catch |err| {
try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
errdefer gpa.free(resolved_path);
mod.comp.whole_cache_manifest_mutex.lock();
defer mod.comp.whole_cache_manifest_mutex.unlock();
try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
}
} else {
if (file.status != .success_zir) {
new_decl.analysis = .file_failure;
return;
}
assert(file.zir_loaded);
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
const sema_arena_allocator = sema_arena.allocator();
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = sema_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
const main_struct_inst = Zir.main_struct_inst;
const struct_ty = sema.getStructType(
new_decl_index,
new_namespace_index,
main_struct_inst,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(struct_ty);
for (comptime_mutable_decls.items) |decl_index| {
const decl = mod.declPtr(decl_index);
_ = try decl.internValue(mod);
}
new_namespace.ty = struct_ty.toType();
new_decl.val = struct_ty.toValue();
new_decl.analysis = .complete;
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
const source = file.getSource(gpa) catch |err| {
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
const resolved_path = std.fs.path.resolve(
gpa,
if (file.pkg.root_src_directory.path) |pkg_path|
&[_][]const u8{ pkg_path, file.sub_file_path }
else
&[_][]const u8{file.sub_file_path},
) catch |err| {
try reportRetryableFileError(mod, file, "unable to resolve path: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
errdefer gpa.free(resolved_path);
mod.comp.whole_cache_manifest_mutex.lock();
defer mod.comp.whole_cache_manifest_mutex.unlock();
try whole_cache_manifest.addFilePostContents(resolved_path, source.bytes, source.stat);
}
}
@@ -4055,18 +3809,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
};
defer sema.deinit();
if (mod.declIsRoot(decl_index)) {
const main_struct_inst = Zir.main_struct_inst;
const struct_index = decl.getOwnedStructIndex(mod).unwrap().?;
const struct_obj = mod.structPtr(struct_index);
// This might not have gotten set in `semaFile` if the first time had
// a ZIR failure, so we set it here in case.
struct_obj.zir_index = main_struct_inst;
try sema.analyzeStructDecl(decl, main_struct_inst, struct_index);
decl.analysis = .complete;
decl.generation = mod.generation;
return false;
}
assert(!mod.declIsRoot(decl_index));
var block_scope: Sema.Block = .{
.parent = null,
@@ -5241,14 +4984,6 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
return mod.intern_pool.destroyNamespace(mod.gpa, index);
}
pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
return mod.intern_pool.createStruct(mod.gpa, initialization);
}
pub fn destroyStruct(mod: *Module, index: Struct.Index) void {
return mod.intern_pool.destroyStruct(mod.gpa, index);
}
pub fn allocateNewDecl(
mod: *Module,
namespace: Namespace.Index,
@@ -6202,7 +5937,6 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!
pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type {
var canon_info = info;
const have_elem_layout = info.child.toType().layoutIsResolved(mod);
if (info.flags.size == .C) canon_info.flags.is_allowzero = true;
@@ -6210,17 +5944,17 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type
// type, we change it to 0 here. If this causes an assertion trip because the
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
if (info.flags.alignment.toByteUnitsOptional()) |info_align| {
if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) {
canon_info.flags.alignment = .none;
}
if (info.flags.alignment != .none and
info.flags.alignment == info.child.toType().abiAlignment(mod))
{
canon_info.flags.alignment = .none;
}
switch (info.flags.vector_index) {
// Canonicalize host_size. If it matches the bit size of the pointee type,
// we change it to 0 here. If this causes an assertion trip, the pointee type
// needs to be resolved before calling this ptr() function.
.none => if (have_elem_layout and info.packed_offset.host_size != 0) {
.none => if (info.packed_offset.host_size != 0) {
const elem_bit_size = info.child.toType().bitSize(mod);
assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8);
if (info.packed_offset.host_size * 8 == elem_bit_size) {
@@ -6483,7 +6217,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
return @as(u16, @intCast(big.bitCountTwosComp()));
},
.lazy_align => |lazy_ty| {
return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @intFromBool(sign);
return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
},
.lazy_size => |lazy_ty| {
return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @intFromBool(sign);
@@ -6639,20 +6373,30 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct {
pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null;
return mod.structPtr(struct_index);
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |t| t,
else => null,
};
}
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |t| if (t.layout == .Packed) t else null,
else => null,
};
}
/// This asserts that the union's enum tag type has been resolved.
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.ip_index)) {
.union_type => |k| return ip.loadUnionType(k),
else => return null,
}
return switch (ip.indexToKey(ty.ip_index)) {
.union_type => |k| ip.loadUnionType(k),
else => null,
};
}
pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
@@ -6741,13 +6485,13 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
pub const UnionLayout = struct {
abi_size: u64,
abi_align: u32,
abi_align: Alignment,
most_aligned_field: u32,
most_aligned_field_size: u64,
biggest_field: u32,
payload_size: u64,
payload_align: u32,
tag_align: u32,
payload_align: Alignment,
tag_align: Alignment,
tag_size: u64,
padding: u32,
};
@@ -6759,35 +6503,37 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
var most_aligned_field_size: u64 = undefined;
var biggest_field: u32 = undefined;
var payload_size: u64 = 0;
var payload_align: u32 = 0;
var payload_align: Alignment = .@"1";
for (u.field_types.get(ip), 0..) |field_ty, i| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = u.fieldAlign(ip, @intCast(i)).toByteUnitsOptional() orelse
const explicit_align = u.fieldAlign(ip, @intCast(i));
const field_align = if (explicit_align != .none)
explicit_align
else
field_ty.toType().abiAlignment(mod);
const field_size = field_ty.toType().abiSize(mod);
if (field_size > payload_size) {
payload_size = field_size;
biggest_field = @intCast(i);
}
if (field_align > payload_align) {
payload_align = @intCast(field_align);
if (field_align.compare(.gte, payload_align)) {
payload_align = field_align;
most_aligned_field = @intCast(i);
most_aligned_field_size = field_size;
}
}
payload_align = @max(payload_align, 1);
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
if (!have_tag or !u.enum_tag_ty.toType().hasRuntimeBits(mod)) {
return .{
.abi_size = std.mem.alignForward(u64, payload_size, payload_align),
.abi_size = payload_align.forward(payload_size),
.abi_align = payload_align,
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
.payload_size = payload_size,
.payload_align = payload_align,
.tag_align = 0,
.tag_align = .none,
.tag_size = 0,
.padding = 0,
};
@@ -6795,29 +6541,29 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
// Put the tag before or after the payload depending on which one's
// alignment is greater.
const tag_size = u.enum_tag_ty.toType().abiSize(mod);
const tag_align = @max(1, u.enum_tag_ty.toType().abiAlignment(mod));
const tag_align = u.enum_tag_ty.toType().abiAlignment(mod).max(.@"1");
var size: u64 = 0;
var padding: u32 = undefined;
if (tag_align >= payload_align) {
if (tag_align.compare(.gte, payload_align)) {
// {Tag, Payload}
size += tag_size;
size = std.mem.alignForward(u64, size, payload_align);
size = payload_align.forward(size);
size += payload_size;
const prev_size = size;
size = std.mem.alignForward(u64, size, tag_align);
padding = @as(u32, @intCast(size - prev_size));
size = tag_align.forward(size);
padding = @intCast(size - prev_size);
} else {
// {Payload, Tag}
size += payload_size;
size = std.mem.alignForward(u64, size, tag_align);
size = tag_align.forward(size);
size += tag_size;
const prev_size = size;
size = std.mem.alignForward(u64, size, payload_align);
padding = @as(u32, @intCast(size - prev_size));
size = payload_align.forward(size);
padding = @intCast(size - prev_size);
}
return .{
.abi_size = size,
.abi_align = @max(tag_align, payload_align),
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
.biggest_field = biggest_field,
@@ -6834,17 +6580,16 @@ pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
}
/// Returns 0 if the union is represented with 0 bits at runtime.
/// TODO: this returns alignment in byte units should should be a u64
pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
var max_align: u32 = 0;
var max_align: Alignment = .none;
if (have_tag) max_align = u.enum_tag_ty.toType().abiAlignment(mod);
for (u.field_types.get(ip), 0..) |field_ty, field_index| {
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_align = mod.unionFieldNormalAlignment(u, @intCast(field_index));
max_align = @max(max_align, field_align);
max_align = max_align.max(field_align);
}
return max_align;
}
@@ -6852,10 +6597,10 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) u32 {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
/// TODO: this returns alignment in byte units should should be a u64
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) u32 {
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a);
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
const field_ty = u.field_types.get(ip)[field_index].toType();
return field_ty.abiAlignment(mod);
}
@@ -6866,3 +6611,64 @@ pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value
const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
return enum_type.tagValueIndex(ip, enum_tag.toIntern());
}
/// Returns the field alignment of a non-packed struct in byte units.
/// Keep implementation in sync with `Sema.structFieldAlignment`.
/// asserts the layout is not packed.
pub fn structFieldAlignment(
mod: *Module,
explicit_alignment: InternPool.Alignment,
field_ty: Type,
layout: std.builtin.Type.ContainerLayout,
) Alignment {
assert(layout != .Packed);
if (explicit_alignment != .none) return explicit_alignment;
switch (layout) {
.Packed => unreachable,
.Auto => {
if (mod.getTarget().ofmt == .c) {
return structFieldAlignmentExtern(mod, field_ty);
} else {
return field_ty.abiAlignment(mod);
}
},
.Extern => return structFieldAlignmentExtern(mod, field_ty),
}
}
/// Returns the field alignment of an extern struct in byte units.
/// This logic is duplicated in Type.abiAlignmentAdvanced.
pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
const ty_abi_align = field_ty.abiAlignment(mod);
if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
return ty_abi_align.max(.@"16");
}
return ty_abi_align;
}
/// TODO: avoid linear search by storing these in trailing data of packed struct types
/// then packedStructFieldByteOffset can be expressed in terms of bits / 8, fixing
/// that one too.
/// https://github.com/ziglang/zig/issues/17178
pub fn structPackedFieldBitOffset(
mod: *Module,
struct_type: InternPool.Key.StructType,
field_index: u32,
) u16 {
const ip = &mod.intern_pool;
assert(struct_type.layout == .Packed);
assert(struct_type.haveLayout(ip));
var bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return @intCast(bit_sum);
}
const field_ty = struct_type.field_types.get(ip)[i].toType();
bit_sum += field_ty.bitSize(mod);
}
unreachable; // index out of bounds
}
+1034 -848
View File
@@ -2221,8 +2221,8 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS
const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{});
errdefer msg.destroy(sema.gpa);
const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg;
const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{
const struct_type = mod.typeToStruct(container_ty) orelse break :msg msg;
const default_value_src = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
.index = field_index,
.range = .value,
});
@@ -2504,23 +2504,33 @@ fn analyzeAsAlign(
const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, .{
.needed_comptime_reason = "alignment must be comptime-known",
});
const alignment: u32 = @intCast(alignment_big); // We coerce to u29 in the prev line.
try sema.validateAlign(block, src, alignment);
return Alignment.fromNonzeroByteUnits(alignment);
return sema.validateAlign(block, src, alignment_big);
}
fn validateAlign(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
alignment: u32,
) !void {
if (alignment == 0) return sema.fail(block, src, "alignment must be >= 1", .{});
alignment: u64,
) !Alignment {
const result = try validateAlignAllowZero(sema, block, src, alignment);
if (result == .none) return sema.fail(block, src, "alignment must be >= 1", .{});
return result;
}
fn validateAlignAllowZero(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
alignment: u64,
) !Alignment {
if (alignment == 0) return .none;
if (!std.math.isPowerOfTwo(alignment)) {
return sema.fail(block, src, "alignment value '{d}' is not a power of two", .{
alignment,
});
}
return Alignment.fromNonzeroByteUnits(alignment);
}
pub fn resolveAlign(
@@ -2619,7 +2629,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
defer trash_block.instructions.deinit(sema.gpa);
const operand = try trash_block.addBitCast(pointee_ty, .void_value);
const ptr_ty = try mod.ptrType(.{
const ptr_ty = try sema.ptrType(.{
.child = pointee_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
@@ -2650,7 +2660,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
if (alignment != .none) {
try sema.resolveTypeLayout(pointee_ty);
}
const ptr_ty = try mod.ptrType(.{
const ptr_ty = try sema.ptrType(.{
.child = pointee_ty.toIntern(),
.flags = .{
.alignment = alignment,
@@ -2720,7 +2730,7 @@ fn coerceResultPtr(
}
}
const ptr_ty = try mod.ptrType(.{
const ptr_ty = try sema.ptrType(.{
.child = pointee_ty.toIntern(),
.flags = .{ .address_space = addr_space },
});
@@ -2749,7 +2759,7 @@ fn coerceResultPtr(
// Array coerced to Vector where element size is not equal but coercible.
.aggregate_init => {
const ty_pl = air_datas[trash_inst].ty_pl;
const ptr_operand_ty = try mod.ptrType(.{
const ptr_operand_ty = try sema.ptrType(.{
.child = (try sema.analyzeAsType(block, src, ty_pl.ty)).toIntern(),
.flags = .{ .address_space = addr_space },
});
@@ -2763,7 +2773,7 @@ fn coerceResultPtr(
.bitcast => {
const ty_op = air_datas[trash_inst].ty_op;
const operand_ty = sema.typeOf(ty_op.operand);
const ptr_operand_ty = try mod.ptrType(.{
const ptr_operand_ty = try sema.ptrType(.{
.child = operand_ty.toIntern(),
.flags = .{ .address_space = addr_space },
});
@@ -2801,26 +2811,26 @@ fn coerceResultPtr(
}
}
pub fn analyzeStructDecl(
pub fn getStructType(
sema: *Sema,
new_decl: *Decl,
inst: Zir.Inst.Index,
struct_index: Module.Struct.Index,
) SemaError!void {
decl: Module.Decl.Index,
namespace: Module.Namespace.Index,
zir_index: Zir.Inst.Index,
) !InternPool.Index {
const mod = sema.mod;
const struct_obj = mod.structPtr(struct_index);
const extended = sema.code.instructions.items(.data)[inst].extended;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const extended = sema.code.instructions.items(.data)[zir_index].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
struct_obj.known_non_opv = small.known_non_opv;
if (small.known_comptime_only) {
struct_obj.requires_comptime = .yes;
}
var extra_index: usize = extended.operand;
extra_index += @intFromBool(small.has_src_node);
extra_index += @intFromBool(small.has_fields_len);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = sema.code.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = sema.code.extra[extra_index];
extra_index += 1;
@@ -2837,7 +2847,23 @@ pub fn analyzeStructDecl(
}
}
_ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl);
extra_index = try mod.scanNamespace(namespace, extra_index, decls_len, mod.declPtr(decl));
const ty = try ip.getStructType(gpa, .{
.decl = decl,
.namespace = namespace.toOptional(),
.zir_index = zir_index,
.layout = small.layout,
.known_non_opv = small.known_non_opv,
.is_tuple = small.is_tuple,
.fields_len = fields_len,
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
.any_default_inits = small.any_default_inits,
.any_comptime_fields = small.any_comptime_fields,
.any_aligned_fields = small.any_aligned_fields,
});
return ty;
}
fn zirStructDecl(
@@ -2847,7 +2873,7 @@ fn zirStructDecl(
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ip = &mod.intern_pool;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const src: LazySrcLoc = if (small.has_src_node) blk: {
const node_offset: i32 = @bitCast(sema.code.extra[extended.operand]);
@@ -2874,37 +2900,21 @@ fn zirStructDecl(
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const struct_index = try mod.createStruct(.{
.owner_decl = new_decl_index,
.fields = .{},
.zir_index = inst,
.layout = small.layout,
.status = .none,
.known_non_opv = undefined,
.is_tuple = small.is_tuple,
.namespace = new_namespace_index,
});
errdefer mod.destroyStruct(struct_index);
const struct_ty = ty: {
const ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{
.index = struct_index.toOptional(),
.namespace = new_namespace_index.toOptional(),
} });
const ty = try sema.getStructType(new_decl_index, new_namespace_index, inst);
if (sema.builtin_type_target_index != .none) {
mod.intern_pool.resolveBuiltinType(sema.builtin_type_target_index, ty);
ip.resolveBuiltinType(sema.builtin_type_target_index, ty);
break :ty sema.builtin_type_target_index;
}
break :ty ty;
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer mod.intern_pool.remove(struct_ty);
//errdefer ip.remove(struct_ty);
new_decl.ty = Type.type;
new_decl.val = struct_ty.toValue();
new_namespace.ty = struct_ty.toType();
try sema.analyzeStructDecl(new_decl, inst, struct_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
@@ -3196,7 +3206,7 @@ fn zirEnumDecl(
extra_index += 1;
const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir);
if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| {
if (incomplete_enum.addFieldName(&mod.intern_pool, field_name)) |other_index| {
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const msg = msg: {
@@ -3227,7 +3237,7 @@ fn zirEnumDecl(
};
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty);
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| {
if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
const value_src = mod.fieldSrcLoc(new_decl_index, .{
.index = field_i,
.range = .value,
@@ -3249,7 +3259,7 @@ fn zirEnumDecl(
else
try mod.intValue(int_tag_ty, 0);
if (overflow != null) break :overflow true;
if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| {
if (incomplete_enum.addFieldValue(&mod.intern_pool, last_tag_val.?.toIntern())) |other_index| {
const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy;
const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy;
const msg = msg: {
@@ -3498,7 +3508,7 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
}
const target = sema.mod.getTarget();
const ptr_type = try sema.mod.ptrType(.{
const ptr_type = try sema.ptrType(.{
.child = sema.fn_ret_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -3507,6 +3517,7 @@ fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
// We are inlining a function call; this should be emitted as an alloc, not a ret_ptr.
// TODO when functions gain result location support, the inlining struct in
// Block should contain the return pointer, and we would pass that through here.
try sema.queueFullTypeResolution(sema.fn_ret_ty);
return block.addTy(.alloc, ptr_type);
}
@@ -3701,7 +3712,7 @@ fn zirAllocExtended(
}
const target = sema.mod.getTarget();
try sema.resolveTypeLayout(var_ty);
const ptr_type = try sema.mod.ptrType(.{
const ptr_type = try sema.ptrType(.{
.child = var_ty.toIntern(),
.flags = .{
.alignment = alignment,
@@ -3810,7 +3821,7 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai
var ptr_info = alloc_ty.ptrInfo(mod);
ptr_info.flags.is_const = true;
const const_ptr_ty = try mod.ptrType(ptr_info);
const const_ptr_ty = try sema.ptrType(ptr_info);
// Detect if a comptime value simply needs to have its type changed.
if (try sema.resolveMaybeUndefVal(alloc)) |val| {
@@ -3852,7 +3863,7 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
return sema.analyzeComptimeAlloc(block, var_ty, .none);
}
const target = sema.mod.getTarget();
const ptr_type = try sema.mod.ptrType(.{
const ptr_type = try sema.ptrType(.{
.child = var_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -3872,7 +3883,7 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
try sema.validateVarType(block, ty_src, var_ty, false);
const target = sema.mod.getTarget();
const ptr_type = try sema.mod.ptrType(.{
const ptr_type = try sema.ptrType(.{
.child = var_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -3938,7 +3949,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const decl = mod.declPtr(decl_index);
if (iac.is_const) _ = try decl.internValue(mod);
const final_elem_ty = decl.ty;
const final_ptr_ty = try mod.ptrType(.{
const final_ptr_ty = try sema.ptrType(.{
.child = final_elem_ty.toIntern(),
.flags = .{
.is_const = false,
@@ -3971,7 +3982,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const peer_inst_list = ia2.prongs.items(.stored_inst);
const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none);
const final_ptr_ty = try mod.ptrType(.{
const final_ptr_ty = try sema.ptrType(.{
.child = final_elem_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
@@ -4093,7 +4104,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
trash_block.is_comptime = false;
defer trash_block.instructions.deinit(gpa);
const mut_final_ptr_ty = try mod.ptrType(.{
const mut_final_ptr_ty = try sema.ptrType(.{
.child = final_elem_ty.toIntern(),
.flags = .{
.alignment = ia1.alignment,
@@ -4688,12 +4699,13 @@ fn validateStructInit(
// In this case the only thing we need to do is evaluate the implicit
// store instructions for default field values, and report any missing fields.
// Avoid the cost of the extra machinery for detecting a comptime struct init value.
for (found_fields, 0..) |field_ptr, i| {
for (found_fields, 0..) |field_ptr, i_usize| {
const i: u32 = @intCast(i_usize);
if (field_ptr != 0) continue;
const default_val = struct_ty.structFieldDefaultValue(i, mod);
if (default_val.toIntern() == .unreachable_value) {
if (struct_ty.isTuple(mod)) {
const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
const template = "missing tuple field with index {d}";
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, .{i});
@@ -4701,8 +4713,7 @@ fn validateStructInit(
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
continue;
}
const field_name = struct_ty.structFieldName(i, mod);
};
const template = "missing struct field: {}";
const args = .{field_name.fmt(ip)};
if (root_msg) |msg| {
@@ -4723,10 +4734,11 @@ fn validateStructInit(
}
if (root_msg) |msg| {
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const fqn = try struct_obj.getFullyQualifiedName(mod);
if (mod.typeToStruct(struct_ty)) |struct_type| {
const decl = mod.declPtr(struct_type.decl.unwrap().?);
const fqn = try decl.getFullyQualifiedName(mod);
try mod.errNoteNonLazy(
struct_obj.srcLoc(mod),
decl.srcLoc(mod),
msg,
"struct '{}' declared here",
.{fqn.fmt(ip)},
@@ -4751,7 +4763,8 @@ fn validateStructInit(
// ends up being comptime-known.
const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod));
field: for (found_fields, 0..) |field_ptr, i| {
field: for (found_fields, 0..) |field_ptr, i_usize| {
const i: u32 = @intCast(i_usize);
if (field_ptr != 0) {
// Determine whether the value stored to this pointer is comptime-known.
const field_ty = struct_ty.structFieldType(i, mod);
@@ -4830,7 +4843,7 @@ fn validateStructInit(
const default_val = struct_ty.structFieldDefaultValue(i, mod);
if (default_val.toIntern() == .unreachable_value) {
if (struct_ty.isTuple(mod)) {
const field_name = struct_ty.structFieldName(i, mod).unwrap() orelse {
const template = "missing tuple field with index {d}";
if (root_msg) |msg| {
try sema.errNote(block, init_src, msg, template, .{i});
@@ -4838,8 +4851,7 @@ fn validateStructInit(
root_msg = try sema.errMsg(block, init_src, template, .{i});
}
continue;
}
const field_name = struct_ty.structFieldName(i, mod);
};
const template = "missing struct field: {}";
const args = .{field_name.fmt(ip)};
if (root_msg) |msg| {
@@ -4853,10 +4865,11 @@ fn validateStructInit(
}
if (root_msg) |msg| {
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const fqn = try struct_obj.getFullyQualifiedName(mod);
if (mod.typeToStruct(struct_ty)) |struct_type| {
const decl = mod.declPtr(struct_type.decl.unwrap().?);
const fqn = try decl.getFullyQualifiedName(mod);
try mod.errNoteNonLazy(
struct_obj.srcLoc(mod),
decl.srcLoc(mod),
msg,
"struct '{}' declared here",
.{fqn.fmt(ip)},
@@ -5255,14 +5268,14 @@ fn failWithBadMemberAccess(
fn failWithBadStructFieldAccess(
sema: *Sema,
block: *Block,
struct_obj: *Module.Struct,
struct_type: InternPool.Key.StructType,
field_src: LazySrcLoc,
field_name: InternPool.NullTerminatedString,
) CompileError {
const mod = sema.mod;
const gpa = sema.gpa;
const fqn = try struct_obj.getFullyQualifiedName(mod);
const decl = mod.declPtr(struct_type.decl.unwrap().?);
const fqn = try decl.getFullyQualifiedName(mod);
const msg = msg: {
const msg = try sema.errMsg(
@@ -5272,7 +5285,7 @@ fn failWithBadStructFieldAccess(
.{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) },
);
errdefer msg.destroy(gpa);
try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{});
try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "struct declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
@@ -5787,9 +5800,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
try mod.semaFile(result.file);
const file_root_decl_index = result.file.root_decl.unwrap().?;
const file_root_decl = mod.declPtr(file_root_decl_index);
try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index);
return Air.internedToRef(file_root_decl.val.toIntern());
return sema.analyzeDeclVal(parent_block, src, file_root_decl_index);
}
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -8638,7 +8649,7 @@ fn analyzeOptionalPayloadPtr(
}
const child_type = opt_type.optionalChild(mod);
const child_pointer = try mod.ptrType(.{
const child_pointer = try sema.ptrType(.{
.child = child_type.toIntern(),
.flags = .{
.is_const = optional_ptr_ty.isConstPtr(mod),
@@ -8707,7 +8718,7 @@ fn zirOptionalPayload(
// TODO https://github.com/ziglang/zig/issues/6597
if (true) break :t operand_ty;
const ptr_info = operand_ty.ptrInfo(mod);
break :t try mod.ptrType(.{
break :t try sema.ptrType(.{
.child = ptr_info.child,
.flags = .{
.alignment = ptr_info.flags.alignment,
@@ -8825,7 +8836,7 @@ fn analyzeErrUnionPayloadPtr(
const err_union_ty = operand_ty.childType(mod);
const payload_ty = err_union_ty.errorUnionPayload(mod);
const operand_pointer_ty = try mod.ptrType(.{
const operand_pointer_ty = try sema.ptrType(.{
.child = payload_ty.toIntern(),
.flags = .{
.is_const = operand_ty.isConstPtr(mod),
@@ -10680,7 +10691,7 @@ const SwitchProngAnalysis = struct {
const union_obj = mod.typeToUnion(operand_ty).?;
const field_ty = union_obj.field_types.get(ip)[field_index].toType();
if (capture_byref) {
const ptr_field_ty = try mod.ptrType(.{
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !operand_ptr_ty.ptrIsMutable(mod),
@@ -10786,7 +10797,7 @@ const SwitchProngAnalysis = struct {
// By-reference captures have some further restrictions which make them easier to emit
if (capture_byref) {
const operand_ptr_info = operand_ptr_ty.ptrInfo(mod);
const capture_ptr_ty = try mod.ptrType(.{
const capture_ptr_ty = try sema.ptrType(.{
.child = capture_ty.toIntern(),
.flags = .{
// TODO: alignment!
@@ -10800,7 +10811,7 @@ const SwitchProngAnalysis = struct {
// pointer type is in-memory coercible to the capture pointer type.
if (!same_types) {
for (field_tys, 0..) |field_ty, i| {
const field_ptr_ty = try mod.ptrType(.{
const field_ptr_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
// TODO: alignment!
@@ -12953,9 +12964,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false;
assert(struct_obj.haveFieldTypes());
break :hf struct_obj.fields.contains(field_name);
break :hf struct_type.nameIndex(ip, field_name) != null;
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
@@ -13025,9 +13034,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
};
try mod.semaFile(result.file);
const file_root_decl_index = result.file.root_decl.unwrap().?;
const file_root_decl = mod.declPtr(file_root_decl_index);
try mod.declareDeclDependency(sema.owner_decl_index, file_root_decl_index);
return Air.internedToRef(file_root_decl.val.toIntern());
return sema.analyzeDeclVal(block, operand_src, file_root_decl_index);
}
fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -13766,12 +13773,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.requireRuntimeBlock(block, src, runtime_src);
if (ptr_addrspace) |ptr_as| {
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = ptr_as },
});
const alloc = try block.addTy(.alloc, alloc_ty);
const elem_ptr_ty = try mod.ptrType(.{
const elem_ptr_ty = try sema.ptrType(.{
.child = resolved_elem_ty.toIntern(),
.flags = .{ .address_space = ptr_as },
});
@@ -14031,12 +14038,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.requireRuntimeBlock(block, src, lhs_src);
if (ptr_addrspace) |ptr_as| {
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = result_ty.toIntern(),
.flags = .{ .address_space = ptr_as },
});
const alloc = try block.addTy(.alloc, alloc_ty);
const elem_ptr_ty = try mod.ptrType(.{
const elem_ptr_ty = try sema.ptrType(.{
.child = lhs_info.elem_type.toIntern(),
.flags = .{ .address_space = ptr_as },
});
@@ -15978,7 +15985,7 @@ fn analyzePtrArithmetic(
));
assert(new_align != .none);
break :t try mod.ptrType(.{
break :t try sema.ptrType(.{
.child = ptr_info.child,
.sentinel = ptr_info.sentinel,
.flags = .{
@@ -16881,7 +16888,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.none, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.ty = (try sema.ptrType(.{
.child = param_info_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -16907,7 +16914,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// calling_convention: CallingConvention,
(try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(),
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(),
(try mod.intValue(Type.comptime_int, ty.abiAlignment(mod).toByteUnits(0))).toIntern(),
// is_generic: bool,
Value.makeBool(func_ty_info.is_generic).toIntern(),
// is_var_args: bool,
@@ -17200,7 +17207,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
// Build our ?[]const Error value
const slice_errors_ty = try mod.ptrType(.{
const slice_errors_ty = try sema.ptrType(.{
.child = error_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -17349,7 +17356,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.none, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.ty = (try sema.ptrType(.{
.child = enum_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -17461,7 +17468,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const alignment = switch (layout) {
.Auto, .Extern => try sema.unionFieldAlignment(union_obj, @intCast(i)),
.Packed => 0,
.Packed => .none,
};
const field_ty = union_obj.field_types.get(ip)[i];
@@ -17471,7 +17478,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: type,
field_ty,
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, alignment)).toIntern(),
(try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = union_field_ty.toIntern(),
@@ -17493,7 +17500,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.none, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.ty = (try sema.ptrType(.{
.child = union_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -17578,7 +17585,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
};
try sema.resolveTypeLayout(ty); // Getting alignment requires type layout
const layout = ty.containerLayout(mod);
var struct_field_vals: []InternPool.Index = &.{};
defer gpa.free(struct_field_vals);
@@ -17633,7 +17639,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(),
(try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod).toByteUnits(0))).toIntern(),
};
struct_field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -17645,16 +17651,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.struct_type => |s| s,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv;
struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count());
struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
for (
struct_field_vals,
struct_obj.fields.keys(),
struct_obj.fields.values(),
) |*field_val, name_nts, field| {
for (struct_field_vals, 0..) |*field_val, i| {
// TODO: write something like getCoercedInts to avoid needing to dupe
const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts));
const name = if (struct_type.fieldName(ip, i).unwrap()) |name_nts|
try sema.arena.dupe(u8, ip.stringToSlice(name_nts))
else
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
const field_ty = struct_type.field_types.get(ip)[i].toType();
const field_init = struct_type.fieldInit(ip, i);
const field_is_comptime = struct_type.fieldIsComptime(ip, i);
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
@@ -17677,24 +17684,28 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
const opt_default_val = if (field.default_val == .none)
null
else
field.default_val.toValue();
const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val);
const alignment = field.alignment(mod, layout);
const opt_default_val = if (field_init == .none) null else field_init.toValue();
const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val);
const alignment = switch (struct_type.layout) {
.Packed => .none,
else => try sema.structFieldAlignment(
struct_type.fieldAlign(ip, i),
field_ty,
struct_type.layout,
),
};
const struct_field_fields = .{
// name: []const u8,
name_val,
// type: type,
field.ty.toIntern(),
field_ty.toIntern(),
// default_value: ?*const anyopaque,
default_val_ptr.toIntern(),
// is_comptime: bool,
Value.makeBool(field.is_comptime).toIntern(),
Value.makeBool(field_is_comptime).toIntern(),
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, alignment)).toIntern(),
(try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@@ -17717,7 +17728,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.none, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.ty = (try sema.ptrType(.{
.child = struct_field_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -17733,11 +17744,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const backing_integer_val = try mod.intern(.{ .opt = .{
.ty = (try mod.optionalType(.type_type)).toIntern(),
.val = if (layout == .Packed) val: {
const struct_obj = mod.typeToStruct(ty).?;
assert(struct_obj.haveLayout());
assert(struct_obj.backing_int_ty.isInt(mod));
break :val struct_obj.backing_int_ty.toIntern();
.val = if (mod.typeToPackedStruct(ty)) |packed_struct| val: {
assert(packed_struct.backingIntType(ip).toType().isInt(mod));
break :val packed_struct.backingIntType(ip).*;
} else .none,
} });
@@ -17754,6 +17763,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
break :t decl.val.toType();
};
const layout = ty.containerLayout(mod);
const field_values = [_]InternPool.Index{
// layout: ContainerLayout,
(try mod.enumValueFieldIndex(container_layout_ty, @intFromEnum(layout))).toIntern(),
@@ -17863,7 +17874,7 @@ fn typeInfoDecls(
.none, // default alignment
);
return try mod.intern(.{ .ptr = .{
.ty = (try mod.ptrType(.{
.ty = (try sema.ptrType(.{
.child = declaration_ty.toIntern(),
.flags = .{
.size = .Slice,
@@ -18433,7 +18444,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
const operand_ty = sema.typeOf(operand);
const ptr_info = operand_ty.ptrInfo(mod);
const res_ty = try mod.ptrType(.{
const res_ty = try sema.ptrType(.{
.child = err_union_ty.errorUnionPayload(mod).toIntern(),
.flags = .{
.is_const = ptr_info.flags.is_const,
@@ -18924,9 +18935,8 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
},
else => {},
}
const abi_align: u32 = @intCast((try val.getUnsignedIntAdvanced(mod, sema)).?);
try sema.validateAlign(block, align_src, abi_align);
break :blk Alignment.fromByteUnits(abi_align);
const align_bytes = (try val.getUnsignedIntAdvanced(mod, sema)).?;
break :blk try sema.validateAlignAllowZero(block, align_src, align_bytes);
} else .none;
const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: {
@@ -18988,7 +18998,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
}
const ty = try mod.ptrType(.{
const ty = try sema.ptrType(.{
.child = elem_ty.toIntern(),
.sentinel = sentinel,
.flags = .{
@@ -19226,7 +19236,7 @@ fn zirStructInit(
if (is_ref) {
const target = mod.getTarget();
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = resolved_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19291,12 +19301,12 @@ fn finishStructInit(
}
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
for (struct_obj.fields.values(), 0..) |field, i| {
for (0..struct_type.field_types.len) |i| {
if (field_inits[i] != .none) continue;
if (field.default_val == .none) {
const field_name = struct_obj.fields.keys()[i];
const field_init = struct_type.fieldInit(ip, i);
if (field_init == .none) {
const field_name = struct_type.field_names.get(ip)[i];
const template = "missing struct field: {}";
const args = .{field_name.fmt(ip)};
if (root_msg) |msg| {
@@ -19305,7 +19315,7 @@ fn finishStructInit(
root_msg = try sema.errMsg(block, init_src, template, args);
}
} else {
field_inits[i] = Air.internedToRef(field.default_val);
field_inits[i] = Air.internedToRef(field_init);
}
}
},
@@ -19313,10 +19323,11 @@ fn finishStructInit(
}
if (root_msg) |msg| {
if (mod.typeToStruct(struct_ty)) |struct_obj| {
const fqn = try struct_obj.getFullyQualifiedName(mod);
if (mod.typeToStruct(struct_ty)) |struct_type| {
const decl = mod.declPtr(struct_type.decl.unwrap().?);
const fqn = try decl.getFullyQualifiedName(mod);
try mod.errNoteNonLazy(
struct_obj.srcLoc(mod),
decl.srcLoc(mod),
msg,
"struct '{}' declared here",
.{fqn.fmt(ip)},
@@ -19349,7 +19360,7 @@ fn finishStructInit(
if (is_ref) {
try sema.resolveStructLayout(struct_ty);
const target = sema.mod.getTarget();
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = struct_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19502,7 +19513,7 @@ fn structInitAnon(
if (is_ref) {
const target = mod.getTarget();
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = tuple_ty,
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19516,7 +19527,7 @@ fn structInitAnon(
};
extra_index = item.end;
const field_ptr_ty = try mod.ptrType(.{
const field_ptr_ty = try sema.ptrType(.{
.child = field_ty,
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19640,7 +19651,7 @@ fn zirArrayInit(
if (is_ref) {
const target = mod.getTarget();
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = array_ty.toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19648,7 +19659,7 @@ fn zirArrayInit(
if (array_ty.isTuple(mod)) {
for (resolved_args, 0..) |arg, i| {
const elem_ptr_ty = try mod.ptrType(.{
const elem_ptr_ty = try sema.ptrType(.{
.child = array_ty.structFieldType(i, mod).toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19661,7 +19672,7 @@ fn zirArrayInit(
return sema.makePtrConst(block, alloc);
}
const elem_ptr_ty = try mod.ptrType(.{
const elem_ptr_ty = try sema.ptrType(.{
.child = array_ty.elemType2(mod).toIntern(),
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19749,14 +19760,14 @@ fn arrayInitAnon(
if (is_ref) {
const target = sema.mod.getTarget();
const alloc_ty = try mod.ptrType(.{
const alloc_ty = try sema.ptrType(.{
.child = tuple_ty,
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
const alloc = try block.addTy(.alloc, alloc_ty);
for (operands, 0..) |operand, i_usize| {
const i: u32 = @intCast(i_usize);
const field_ptr_ty = try mod.ptrType(.{
const field_ptr_ty = try sema.ptrType(.{
.child = types[i],
.flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) },
});
@@ -19848,10 +19859,10 @@ fn fieldType(
return Air.internedToRef(anon_struct.types.get(ip)[field_index]);
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const field = struct_obj.fields.get(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
return Air.internedToRef(field.ty.toIntern());
const field_index = struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
const field_ty = struct_type.field_types.get(ip)[field_index];
return Air.internedToRef(field_ty);
},
else => unreachable,
},
@@ -20167,14 +20178,14 @@ fn zirReify(
.AnyFrame => return sema.failWithUseOfAsync(block, src),
.EnumLiteral => return .enum_literal_type,
.Int => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const signedness_val = try union_val.val.toValue().fieldValue(
mod,
fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "signedness")).?,
);
const bits_val = try union_val.val.toValue().fieldValue(
mod,
fields.getIndex(try ip.getOrPutString(gpa, "bits")).?,
struct_type.nameIndex(ip, try ip.getOrPutString(gpa, "bits")).?,
);
const signedness = mod.toEnum(std.builtin.Signedness, signedness_val);
@@ -20183,11 +20194,13 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Vector => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
).?);
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
@@ -20203,8 +20216,9 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Float => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const bits_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "bits"),
).?);
@@ -20220,29 +20234,37 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Pointer => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const size_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "size"),
).?);
const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_const_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_const"),
).?);
const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_volatile_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_volatile"),
).?);
const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const address_space_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "address_space"),
).?);
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_allowzero"),
).?);
const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "sentinel"),
).?);
@@ -20307,7 +20329,7 @@ fn zirReify(
}
}
const ty = try mod.ptrType(.{
const ty = try sema.ptrType(.{
.child = elem_ty.toIntern(),
.sentinel = actual_sentinel,
.flags = .{
@@ -20322,14 +20344,17 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Array => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const len_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "len"),
).?);
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const sentinel_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "sentinel"),
).?);
@@ -20348,8 +20373,9 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Optional => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const child_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "child"),
).?);
@@ -20359,11 +20385,13 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.ErrorUnion => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const error_set_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "error_set"),
).?);
const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const payload_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "payload"),
).?);
@@ -20386,8 +20414,9 @@ fn zirReify(
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try payload_val.elemValue(mod, i);
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
@@ -20405,20 +20434,25 @@ fn zirReify(
return Air.internedToRef(ty.toIntern());
},
.Struct => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
).?);
const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const backing_integer_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "backing_integer"),
).?);
const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields"),
).?);
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_tuple_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_tuple"),
).?);
@@ -20436,17 +20470,21 @@ fn zirReify(
return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool());
},
.Enum => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type"),
).?);
const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields"),
).?);
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_exhaustive"),
).?);
@@ -20496,11 +20534,13 @@ fn zirReify(
for (0..fields_len) |field_i| {
const elem_val = try fields_val.elemValue(mod, field_i);
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "value"),
).?);
@@ -20515,7 +20555,7 @@ fn zirReify(
});
}
if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| {
if (incomplete_enum.addFieldName(ip, field_name)) |other_index| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{
field_name.fmt(ip),
@@ -20528,7 +20568,7 @@ fn zirReify(
return sema.failWithOwnedErrorMsg(block, msg);
}
if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
if (incomplete_enum.addFieldValue(ip, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| {
const msg = msg: {
const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)});
errdefer msg.destroy(gpa);
@@ -20545,8 +20585,9 @@ fn zirReify(
return decl_val;
},
.Opaque => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
@@ -20594,17 +20635,21 @@ fn zirReify(
return decl_val;
},
.Union => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const layout_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "layout"),
).?);
const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const tag_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "tag_type"),
).?);
const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const fields_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "fields"),
).?);
const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const decls_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "decls"),
).?);
@@ -20644,14 +20689,17 @@ fn zirReify(
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "type"),
).?);
const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
@@ -20812,23 +20860,29 @@ fn zirReify(
return decl_val;
},
.Fn => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const struct_type = ip.indexToKey(ip.typeOf(union_val.val)).struct_type;
const calling_convention_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "calling_convention"),
).?);
const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const alignment_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_generic_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic"),
).?);
const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const is_var_args_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_var_args"),
).?);
const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const return_type_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "return_type"),
).?);
const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex(
const params_val = try union_val.val.toValue().fieldValue(mod, struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "params"),
).?);
@@ -20844,15 +20898,9 @@ fn zirReify(
}
const alignment = alignment: {
if (!try sema.intFitsInType(alignment_val, Type.u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
const alignment: u29 = @intCast(alignment_val.toUnsignedInt(mod));
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :alignment .none;
} else {
break :alignment Alignment.fromByteUnits(alignment);
}
const alignment = try sema.validateAlignAllowZero(block, src, alignment_val.toUnsignedInt(mod));
const default = target_util.defaultFunctionAlignment(target);
break :alignment if (alignment == default) .none else alignment;
};
const return_type = return_type_val.optionalValue(mod) orelse
return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{});
@@ -20863,14 +20911,17 @@ fn zirReify(
var noalias_bits: u32 = 0;
for (param_types, 0..) |*param_type, i| {
const elem_val = try params_val.elemValue(mod, i);
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const param_is_generic_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_generic"),
).?);
const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const param_is_noalias_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_noalias"),
).?);
const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const opt_param_type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "type"),
).?);
@@ -20931,6 +20982,8 @@ fn reifyStruct(
.Auto => {},
};
const fields_len: u32 = @intCast(try sema.usizeCast(block, src, fields_val.sliceLen(mod)));
// Because these three things each reference each other, `undefined`
// placeholders are used before being set after the struct type gains an
// InternPool index.
@@ -20946,58 +20999,52 @@ fn reifyStruct(
mod.abortAnonDecl(new_decl_index);
}
const new_namespace_index = try mod.createNamespace(.{
.parent = block.namespace.toOptional(),
.ty = undefined,
.file_scope = block.getFileScope(mod),
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const struct_index = try mod.createStruct(.{
.owner_decl = new_decl_index,
.fields = .{},
const ty = try ip.getStructType(gpa, .{
.decl = new_decl_index,
.namespace = .none,
.zir_index = inst,
.layout = layout,
.status = .have_field_types,
.known_non_opv = false,
.fields_len = fields_len,
.requires_comptime = .unknown,
.is_tuple = is_tuple,
.namespace = new_namespace_index,
// So that we don't have to scan ahead, we allocate space in the struct
// type for alignments, comptime fields, and default inits. This might
// result in wasted space, however, this is a permitted encoding of
// struct types.
.any_comptime_fields = true,
.any_default_inits = true,
.any_aligned_fields = true,
});
const struct_obj = mod.structPtr(struct_index);
errdefer mod.destroyStruct(struct_index);
const struct_ty = try ip.get(gpa, .{ .struct_type = .{
.index = struct_index.toOptional(),
.namespace = new_namespace_index.toOptional(),
} });
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(struct_ty);
//errdefer ip.remove(ty);
const struct_type = ip.indexToKey(ty).struct_type;
new_decl.ty = Type.type;
new_decl.val = struct_ty.toValue();
new_namespace.ty = struct_ty.toType();
new_decl.val = ty.toValue();
// Fields
const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod));
try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
var i: usize = 0;
while (i < fields_len) : (i += 1) {
for (0..fields_len) |i| {
const elem_val = try fields_val.elemValue(mod, i);
const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod);
const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const elem_struct_type = ip.indexToKey(ip.typeOf(elem_val.toIntern())).struct_type;
const name_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "name"),
).?);
const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const type_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "type"),
).?);
const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const default_value_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "default_value"),
).?);
const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const is_comptime_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "is_comptime"),
).?);
const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex(
const alignment_val = try elem_val.fieldValue(mod, elem_struct_type.nameIndex(
ip,
try ip.getOrPutString(gpa, "alignment"),
).?);
@@ -21009,6 +21056,8 @@ fn reifyStruct(
if (layout == .Packed) {
if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{});
} else {
struct_type.field_aligns.get(ip)[i] = Alignment.fromByteUnits(abi_align);
}
if (layout == .Extern and is_comptime_val.toBool()) {
return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{});
@@ -21032,10 +21081,8 @@ fn reifyStruct(
.{field_index},
);
}
}
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
// TODO: better source location
} else if (struct_type.addFieldName(ip, field_name)) |prev_index| {
_ = prev_index; // TODO: better source location
return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)});
}
@@ -21051,13 +21098,10 @@ fn reifyStruct(
return sema.fail(block, src, "comptime field without default initialization value", .{});
}
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = Alignment.fromByteUnits(abi_align),
.default_val = default_val,
.is_comptime = is_comptime_val.toBool(),
.offset = undefined,
};
struct_type.field_types.get(ip)[i] = field_ty.toIntern();
struct_type.field_inits.get(ip)[i] = default_val;
if (is_comptime_val.toBool())
struct_type.setFieldComptime(ip, i);
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
@@ -21079,7 +21123,7 @@ fn reifyStruct(
};
return sema.failWithOwnedErrorMsg(block, msg);
}
if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
@@ -21091,7 +21135,7 @@ fn reifyStruct(
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
} else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) {
} else if (layout == .Packed and !(validatePackedType(field_ty, mod))) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
errdefer msg.destroy(gpa);
@@ -21107,13 +21151,12 @@ fn reifyStruct(
}
if (layout == .Packed) {
struct_obj.status = .layout_wip;
for (struct_obj.fields.values(), 0..) |field, index| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
for (0..struct_type.field_types.len) |index| {
const field_ty = struct_type.field_types.get(ip)[index].toType();
sema.resolveTypeLayout(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{});
try sema.addFieldErrNote(ty.toType(), index, msg, "while checking this field", .{});
return err;
},
else => return err,
@@ -21121,19 +21164,18 @@ fn reifyStruct(
}
var fields_bit_sum: u64 = 0;
for (struct_obj.fields.values()) |field| {
fields_bit_sum += field.ty.bitSize(mod);
for (struct_type.field_types.get(ip)) |field_ty| {
fields_bit_sum += field_ty.toType().bitSize(mod);
}
if (backing_int_val.optionalValue(mod)) |payload| {
const backing_int_ty = payload.toType();
if (backing_int_val.optionalValue(mod)) |backing_int_ty_val| {
const backing_int_ty = backing_int_ty_val.toType();
try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum);
struct_obj.backing_int_ty = backing_int_ty;
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
} else {
struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
}
struct_obj.status = .have_layout;
}
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
@@ -21439,8 +21481,9 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize);
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align > 1) {
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern());
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
@@ -21458,8 +21501,9 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const is_non_zero = try block.addBinOp(.cmp_neq, elem_coerced, .zero_usize);
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align > 1) {
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, ptr_align - 1)).toIntern());
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
@@ -21476,12 +21520,19 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return block.addAggregateInit(dest_ty, new_elems);
}
fn ptrFromIntVal(sema: *Sema, block: *Block, operand_src: LazySrcLoc, operand_val: Value, ptr_ty: Type, ptr_align: u32) !Value {
fn ptrFromIntVal(
sema: *Sema,
block: *Block,
operand_src: LazySrcLoc,
operand_val: Value,
ptr_ty: Type,
ptr_align: Alignment,
) !Value {
const mod = sema.mod;
const addr = operand_val.toUnsignedInt(mod);
if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0)
return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)});
if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0)
if (addr != 0 and ptr_align != .none and !ptr_align.check(addr))
return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)});
return switch (ptr_ty.zigTypeTag(mod)) {
@@ -21795,18 +21846,26 @@ fn ptrCastFull(
// TODO: vector index?
}
const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse src_info.child.toType().abiAlignment(mod);
const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse dest_info.child.toType().abiAlignment(mod);
const src_align = if (src_info.flags.alignment != .none)
src_info.flags.alignment
else
src_info.child.toType().abiAlignment(mod);
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
dest_info.child.toType().abiAlignment(mod);
if (!flags.align_cast) {
if (dest_align > src_align) {
if (dest_align.compare(.gt, src_align)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{
operand_ty.fmt(mod), src_align,
operand_ty.fmt(mod), src_align.toByteUnits(0),
});
try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{
dest_ty.fmt(mod), dest_align,
dest_ty.fmt(mod), dest_align.toByteUnits(0),
});
try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{});
break :msg msg;
@@ -21874,7 +21933,7 @@ fn ptrCastFull(
// Only convert to a many-pointer at first
var info = dest_info;
info.flags.size = .Many;
const ty = try mod.ptrType(info);
const ty = try sema.ptrType(info);
if (dest_ty.zigTypeTag(mod) == .Optional) {
break :blk try mod.optionalType(ty.toIntern());
} else {
@@ -21891,10 +21950,13 @@ fn ptrCastFull(
if (!dest_ty.ptrAllowsZero(mod) and ptr_val.isNull(mod)) {
return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)});
}
if (dest_align > src_align) {
if (dest_align.compare(.gt, src_align)) {
if (try ptr_val.getUnsignedIntAdvanced(mod, null)) |addr| {
if (addr % dest_align != 0) {
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align });
if (!dest_align.check(addr)) {
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
addr,
dest_align.toByteUnitsOptional().?,
});
}
}
}
@@ -21928,8 +21990,12 @@ fn ptrCastFull(
try sema.addSafetyCheck(block, src, ok, .cast_to_null);
}
if (block.wantSafety() and dest_align > src_align and try sema.typeHasRuntimeBits(dest_info.child.toType())) {
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, dest_align - 1)).toIntern());
if (block.wantSafety() and
dest_align.compare(.gt, src_align) and
try sema.typeHasRuntimeBits(dest_info.child.toType()))
{
const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@@ -21946,7 +22012,7 @@ fn ptrCastFull(
// We can't change address spaces with a bitcast, so this requires two instructions
var intermediate_info = src_info;
intermediate_info.flags.address_space = dest_info.flags.address_space;
const intermediate_ptr_ty = try mod.ptrType(intermediate_info);
const intermediate_ptr_ty = try sema.ptrType(intermediate_info);
const intermediate_ty = if (dest_ptr_ty.zigTypeTag(mod) == .Optional) blk: {
break :blk try mod.optionalType(intermediate_ptr_ty.toIntern());
} else intermediate_ptr_ty;
@@ -22002,7 +22068,7 @@ fn zirPtrCastNoDest(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
var ptr_info = operand_ty.ptrInfo(mod);
if (flags.const_cast) ptr_info.flags.is_const = false;
if (flags.volatile_cast) ptr_info.flags.is_volatile = false;
const dest_ty = try mod.ptrType(ptr_info);
const dest_ty = try sema.ptrType(ptr_info);
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
return Air.internedToRef((try mod.getCoerced(operand_val, dest_ty)).toIntern());
@@ -22285,6 +22351,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
});
const mod = sema.mod;
const ip = &mod.intern_pool;
try sema.resolveTypeLayout(ty);
switch (ty.zigTypeTag(mod)) {
.Struct => {},
@@ -22300,7 +22367,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
}
const field_index = if (ty.isTuple(mod)) blk: {
if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
if (ip.stringEqlSlice(field_name, "len")) {
return sema.fail(block, src, "no offset available for 'len' field of tuple", .{});
}
break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src);
@@ -22313,12 +22380,13 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
switch (ty.containerLayout(mod)) {
.Packed => {
var bit_sum: u64 = 0;
const fields = ty.structFields(mod);
for (fields.values(), 0..) |field, i| {
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return bit_sum;
}
bit_sum += field.ty.bitSize(mod);
const field_ty = struct_type.field_types.get(ip)[i].toType();
bit_sum += field_ty.bitSize(mod);
} else unreachable;
},
else => return ty.structFieldOffset(field_index, mod) * 8,
@@ -22535,7 +22603,7 @@ fn checkAtomicPtrOperand(
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) {
.Pointer => ptr_ty.ptrInfo(mod),
else => {
const wanted_ptr_ty = try mod.ptrType(wanted_ptr_data);
const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data);
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
unreachable;
},
@@ -22545,7 +22613,7 @@ fn checkAtomicPtrOperand(
wanted_ptr_data.flags.is_allowzero = ptr_data.flags.is_allowzero;
wanted_ptr_data.flags.is_volatile = ptr_data.flags.is_volatile;
const wanted_ptr_ty = try mod.ptrType(wanted_ptr_data);
const wanted_ptr_ty = try sema.ptrType(wanted_ptr_data);
const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
return casted_ptr;
@@ -23717,8 +23785,8 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
} else {
ptr_ty_data.flags.alignment = blk: {
if (mod.typeToStruct(parent_ty)) |struct_obj| {
break :blk struct_obj.fields.values()[field_index].abi_align;
if (mod.typeToStruct(parent_ty)) |struct_type| {
break :blk struct_type.fieldAlign(ip, field_index);
} else if (mod.typeToUnion(parent_ty)) |union_obj| {
break :blk union_obj.fieldAlign(ip, field_index);
} else {
@@ -23727,11 +23795,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
};
}
const actual_field_ptr_ty = try mod.ptrType(ptr_ty_data);
const actual_field_ptr_ty = try sema.ptrType(ptr_ty_data);
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src);
ptr_ty_data.child = parent_ty.toIntern();
const result_ptr = try mod.ptrType(ptr_ty_data);
const result_ptr = try sema.ptrType(ptr_ty_data);
if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
const field = switch (ip.indexToKey(field_ptr_val.toIntern())) {
@@ -24062,7 +24130,7 @@ fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !A
// Already an array pointer.
return ptr;
}
const new_ty = try mod.ptrType(.{
const new_ty = try sema.ptrType(.{
.child = (try mod.arrayType(.{
.len = len,
.sentinel = info.sentinel,
@@ -24266,7 +24334,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
assert(dest_manyptr_ty_key.flags.size == .One);
dest_manyptr_ty_key.child = dest_elem_ty.toIntern();
dest_manyptr_ty_key.flags.size = .Many;
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src);
break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src);
} else new_dest_ptr;
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
@@ -24277,7 +24345,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
assert(src_manyptr_ty_key.flags.size == .One);
src_manyptr_ty_key.child = src_elem_ty.toIntern();
src_manyptr_ty_key.flags.size = .Many;
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src);
break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(src_manyptr_ty_key), new_src_ptr, src_src);
} else new_src_ptr;
// ok1: dest >= src + len
@@ -24528,13 +24596,9 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
if (val.isGenericPoison()) {
break :blk null;
}
const alignment: u32 = @intCast(val.toUnsignedInt(mod));
try sema.validateAlign(block, align_src, alignment);
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :blk .none;
} else {
break :blk Alignment.fromNonzeroByteUnits(alignment);
}
const alignment = try sema.validateAlignAllowZero(block, align_src, val.toUnsignedInt(mod));
const default = target_util.defaultFunctionAlignment(target);
break :blk if (alignment == default) .none else alignment;
} else if (extra.data.bits.has_align_ref) blk: {
const align_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
@@ -24546,13 +24610,9 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
else => |e| return e,
};
const alignment: u32 = @intCast(align_tv.val.toUnsignedInt(mod));
try sema.validateAlign(block, align_src, alignment);
if (alignment == target_util.defaultFunctionAlignment(target)) {
break :blk .none;
} else {
break :blk Alignment.fromNonzeroByteUnits(alignment);
}
const alignment = try sema.validateAlignAllowZero(block, align_src, align_tv.val.toUnsignedInt(mod));
const default = target_util.defaultFunctionAlignment(target);
break :blk if (alignment == default) .none else alignment;
} else .none;
const @"addrspace": ?std.builtin.AddressSpace = if (extra.data.bits.has_addrspace_body) blk: {
@@ -25237,16 +25297,17 @@ fn explainWhyTypeIsComptimeInner(
.Struct => {
if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return;
if (mod.typeToStruct(ty)) |struct_obj| {
for (struct_obj.fields.values(), 0..) |field, i| {
const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{
if (mod.typeToStruct(ty)) |struct_type| {
for (0..struct_type.field_types.len) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
const field_src_loc = mod.fieldSrcLoc(struct_type.decl.unwrap().?, .{
.index = i,
.range = .type,
});
if (try sema.typeRequiresComptime(field.ty)) {
if (try sema.typeRequiresComptime(field_ty)) {
try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{});
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field.ty, type_set);
try sema.explainWhyTypeIsComptimeInner(msg, field_src_loc, field_ty, type_set);
}
}
}
@@ -25515,7 +25576,7 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void {
const stack_trace_ty = try sema.getBuiltinType("StackTrace");
try sema.resolveTypeFields(stack_trace_ty);
const target = mod.getTarget();
const ptr_stack_trace_ty = try mod.ptrType(.{
const ptr_stack_trace_ty = try sema.ptrType(.{
.child = stack_trace_ty.toIntern(),
.flags = .{
.address_space = target_util.defaultAddressSpace(target, .global_constant),
@@ -25867,7 +25928,7 @@ fn fieldVal(
return Air.internedToRef((try mod.intValue(Type.usize, inner_ty.arrayLen(mod))).toIntern());
} else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(mod);
const result_ty = try mod.ptrType(.{
const result_ty = try sema.ptrType(.{
.child = ptr_info.child.toType().childType(mod).toIntern(),
.sentinel = ptr_info.sentinel,
.flags = .{
@@ -26086,7 +26147,7 @@ fn fieldPtr(
if (ip.stringEqlSlice(field_name, "ptr")) {
const slice_ptr_ty = inner_ty.slicePtrFieldType(mod);
const result_ty = try mod.ptrType(.{
const result_ty = try sema.ptrType(.{
.child = slice_ptr_ty.toIntern(),
.flags = .{
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
@@ -26108,7 +26169,7 @@ fn fieldPtr(
return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
} else if (ip.stringEqlSlice(field_name, "len")) {
const result_ty = try mod.ptrType(.{
const result_ty = try sema.ptrType(.{
.child = .usize_type,
.flags = .{
.is_const = !attr_ptr_ty.ptrIsMutable(mod),
@@ -26297,13 +26358,12 @@ fn fieldCallBind(
switch (concrete_ty.zigTypeTag(mod)) {
.Struct => {
try sema.resolveTypeFields(concrete_ty);
if (mod.typeToStruct(concrete_ty)) |struct_obj| {
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
if (mod.typeToStruct(concrete_ty)) |struct_type| {
const field_index = struct_type.nameIndex(ip, field_name) orelse
break :find_field;
const field_index: u32 = @intCast(field_index_usize);
const field = struct_obj.fields.values()[field_index];
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr);
return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr);
} else if (concrete_ty.isTuple(mod)) {
if (ip.stringEqlSlice(field_name, "len")) {
return .{ .direct = try mod.intRef(Type.usize, concrete_ty.structFieldCount(mod)) };
@@ -26316,7 +26376,7 @@ fn fieldCallBind(
const max = concrete_ty.structFieldCount(mod);
for (0..max) |i_usize| {
const i: u32 = @intCast(i_usize);
if (field_name == concrete_ty.structFieldName(i, mod)) {
if (field_name == concrete_ty.structFieldName(i, mod).unwrap().?) {
return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, mod), i, object_ptr);
}
}
@@ -26434,7 +26494,7 @@ fn finishFieldCallBind(
object_ptr: Air.Inst.Ref,
) CompileError!ResolvedFieldCallee {
const mod = sema.mod;
const ptr_field_ty = try mod.ptrType(.{
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !ptr_ty.ptrIsMutable(mod),
@@ -26526,13 +26586,14 @@ fn structFieldPtr(
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
assert(struct_ty.zigTypeTag(mod) == .Struct);
try sema.resolveTypeFields(struct_ty);
try sema.resolveStructLayout(struct_ty);
if (struct_ty.isTuple(mod)) {
if (mod.intern_pool.stringEqlSlice(field_name, "len")) {
if (ip.stringEqlSlice(field_name, "len")) {
const len_inst = try mod.intRef(Type.usize, struct_ty.structFieldCount(mod));
return sema.analyzeRef(block, src, len_inst);
}
@@ -26543,11 +26604,10 @@ fn structFieldPtr(
return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing);
}
const struct_obj = mod.typeToStruct(struct_ty).?;
const struct_type = mod.typeToStruct(struct_ty).?;
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index: u32 = @intCast(field_index_big);
const field_index = struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name);
return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing);
}
@@ -26563,17 +26623,18 @@ fn structFieldPtrByIndex(
initializing: bool,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
if (struct_ty.isAnonStruct(mod)) {
return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing);
}
const struct_obj = mod.typeToStruct(struct_ty).?;
const field = struct_obj.fields.values()[field_index];
const struct_type = mod.typeToStruct(struct_ty).?;
const field_ty = struct_type.field_types.get(ip)[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
var ptr_ty_data: InternPool.Key.PtrType = .{
.child = field.ty.toIntern(),
.child = field_ty,
.flags = .{
.is_const = struct_ptr_ty_info.flags.is_const,
.is_volatile = struct_ptr_ty_info.flags.is_volatile,
@@ -26583,20 +26644,23 @@ fn structFieldPtrByIndex(
const target = mod.getTarget();
const parent_align = struct_ptr_ty_info.flags.alignment.toByteUnitsOptional() orelse
const parent_align = if (struct_ptr_ty_info.flags.alignment != .none)
struct_ptr_ty_info.flags.alignment
else
try sema.typeAbiAlignment(struct_ptr_ty_info.child.toType());
if (struct_obj.layout == .Packed) {
if (struct_type.layout == .Packed) {
comptime assert(Type.packed_struct_layout_version == 2);
var running_bits: u16 = 0;
for (struct_obj.fields.values(), 0..) |f, i| {
if (!(try sema.typeHasRuntimeBits(f.ty))) continue;
for (0..struct_type.field_types.len) |i| {
const f_ty = struct_type.field_types.get(ip)[i].toType();
if (!(try sema.typeHasRuntimeBits(f_ty))) continue;
if (i == field_index) {
ptr_ty_data.packed_offset.bit_offset = running_bits;
}
running_bits += @intCast(f.ty.bitSize(mod));
running_bits += @intCast(f_ty.bitSize(mod));
}
ptr_ty_data.packed_offset.host_size = (running_bits + 7) / 8;
@@ -26607,7 +26671,7 @@ fn structFieldPtrByIndex(
ptr_ty_data.packed_offset.bit_offset += struct_ptr_ty_info.packed_offset.bit_offset;
}
ptr_ty_data.flags.alignment = Alignment.fromByteUnits(parent_align);
ptr_ty_data.flags.alignment = parent_align;
// If the field happens to be byte-aligned, simplify the pointer type.
// The pointee type bit size must match its ABI byte size so that loads and stores
@@ -26617,38 +26681,47 @@ fn structFieldPtrByIndex(
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
if (parent_align != 0 and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
if (parent_align != .none and ptr_ty_data.packed_offset.bit_offset % 8 == 0 and
target.cpu.arch.endian() == .Little)
{
const elem_size_bytes = ptr_ty_data.child.toType().abiSize(mod);
const elem_size_bytes = try sema.typeAbiSize(ptr_ty_data.child.toType());
const elem_size_bits = ptr_ty_data.child.toType().bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align));
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?));
assert(new_align != .none);
ptr_ty_data.flags.alignment = new_align;
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
}
}
} else if (struct_obj.layout == .Extern) {
// For extern structs, field aligment might be bigger than type's natural alignment. Eg, in
// `extern struct { x: u32, y: u16 }` the second field is aligned as u32.
} else if (struct_type.layout == .Extern) {
// For extern structs, field alignment might be bigger than type's
// natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
// second field is aligned as u32.
const field_offset = struct_ty.structFieldOffset(field_index, mod);
ptr_ty_data.flags.alignment = Alignment.fromByteUnits(
if (parent_align == 0) 0 else std.math.gcd(field_offset, parent_align),
);
ptr_ty_data.flags.alignment = if (parent_align == .none)
.none
else
@enumFromInt(@min(@intFromEnum(parent_align), @ctz(field_offset)));
} else {
// Our alignment is capped at the field alignment
const field_align = try sema.structFieldAlignment(field, struct_obj.layout);
ptr_ty_data.flags.alignment = Alignment.fromByteUnits(@min(field_align, parent_align));
// Our alignment is capped at the field alignment.
const field_align = try sema.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty.toType(),
struct_type.layout,
);
ptr_ty_data.flags.alignment = if (struct_ptr_ty_info.flags.alignment == .none)
field_align
else
field_align.min(parent_align);
}
const ptr_field_ty = try mod.ptrType(ptr_ty_data);
const ptr_field_ty = try sema.ptrType(ptr_ty_data);
if (field.is_comptime) {
if (struct_type.fieldIsComptime(ip, field_index)) {
const val = try mod.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
.addr = .{ .comptime_field = field.default_val },
.addr = .{ .comptime_field = struct_type.field_inits.get(ip)[field_index] },
} });
return Air.internedToRef(val);
}
@@ -26678,33 +26751,34 @@ fn structFieldVal(
struct_ty: Type,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
assert(struct_ty.zigTypeTag(mod) == .Struct);
try sema.resolveTypeFields(struct_ty);
switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) {
switch (ip.indexToKey(struct_ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
if (struct_type.isTuple(ip))
return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty);
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index: u32 = @intCast(field_index_usize);
const field = struct_obj.fields.values()[field_index];
if (field.is_comptime) {
return Air.internedToRef(field.default_val);
const field_index = struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_type, field_name_src, field_name);
if (struct_type.fieldIsComptime(ip, field_index)) {
return Air.internedToRef(struct_type.field_inits.get(ip)[field_index]);
}
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| {
if (struct_val.isUndef(mod)) return mod.undefRef(field.ty);
if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| {
if (struct_val.isUndef(mod)) return mod.undefRef(field_ty);
if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| {
return Air.internedToRef(opv.toIntern());
}
return Air.internedToRef((try struct_val.fieldValue(mod, field_index)).toIntern());
}
try sema.requireRuntimeBlock(block, src, null);
return block.addStructFieldVal(struct_byval, field_index, field.ty);
try sema.resolveTypeLayout(field_ty);
return block.addStructFieldVal(struct_byval, field_index, field_ty);
},
.anon_struct_type => |anon_struct| {
if (anon_struct.names.len == 0) {
@@ -26792,6 +26866,7 @@ fn tupleFieldValByIndex(
}
try sema.requireRuntimeBlock(block, src, null);
try sema.resolveTypeLayout(field_ty);
return block.addStructFieldVal(tuple_byval, field_index, field_ty);
}
@@ -26816,16 +26891,19 @@ fn unionFieldPtr(
const union_obj = mod.typeToUnion(union_ty).?;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field_ty = union_obj.field_types.get(ip)[field_index].toType();
const ptr_field_ty = try mod.ptrType(.{
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = union_ptr_info.flags.is_const,
.is_volatile = union_ptr_info.flags.is_volatile,
.address_space = union_ptr_info.flags.address_space,
.alignment = if (union_obj.getLayout(ip) == .Auto) blk: {
const union_align = union_ptr_info.flags.alignment.toByteUnitsOptional() orelse try sema.typeAbiAlignment(union_ty);
const union_align = if (union_ptr_info.flags.alignment != .none)
union_ptr_info.flags.alignment
else
try sema.typeAbiAlignment(union_ty);
const field_align = try sema.unionFieldAlignment(union_obj, field_index);
break :blk InternPool.Alignment.fromByteUnits(@min(union_align, field_align));
break :blk union_align.min(field_align);
} else union_ptr_info.flags.alignment,
},
.packed_offset = union_ptr_info.packed_offset,
@@ -26970,6 +27048,7 @@ fn unionFieldVal(
_ = try block.addNoOp(.unreach);
return .unreachable_value;
}
try sema.resolveTypeLayout(field_ty);
return block.addStructFieldVal(union_byval, field_index, field_ty);
}
@@ -27194,7 +27273,7 @@ fn tupleFieldPtr(
}
const field_ty = tuple_ty.structFieldType(field_index, mod);
const ptr_field_ty = try mod.ptrType(.{
const ptr_field_ty = try sema.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !tuple_ptr_ty.ptrIsMutable(mod),
@@ -27265,6 +27344,7 @@ fn tupleField(
try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src);
try sema.requireRuntimeBlock(block, tuple_src, null);
try sema.resolveTypeLayout(field_ty);
return block.addStructFieldVal(tuple, field_index, field_ty);
}
@@ -28266,7 +28346,7 @@ const InMemoryCoercionResult = union(enum) {
ptr_qualifiers: Qualifiers,
ptr_allowzero: Pair,
ptr_bit_range: BitRange,
ptr_alignment: IntPair,
ptr_alignment: AlignPair,
double_ptr_to_anyopaque: Pair,
slice_to_anyopaque: Pair,
@@ -28312,6 +28392,11 @@ const InMemoryCoercionResult = union(enum) {
wanted: u64,
};
const AlignPair = struct {
actual: Alignment,
wanted: Alignment,
};
const Size = struct {
actual: std.builtin.Type.Pointer.Size,
wanted: std.builtin.Type.Pointer.Size,
@@ -28555,8 +28640,8 @@ const InMemoryCoercionResult = union(enum) {
break;
},
.ptr_alignment => |pair| {
try sema.errNote(block, src, msg, "pointer alignment '{}' cannot cast into pointer alignment '{}'", .{
pair.actual, pair.wanted,
try sema.errNote(block, src, msg, "pointer alignment '{d}' cannot cast into pointer alignment '{d}'", .{
pair.actual.toByteUnits(0), pair.wanted.toByteUnits(0),
});
break;
},
@@ -29133,13 +29218,17 @@ fn coerceInMemoryAllowedPtrs(
if (src_info.flags.alignment != .none or dest_info.flags.alignment != .none or
dest_info.child != src_info.child)
{
const src_align = src_info.flags.alignment.toByteUnitsOptional() orelse
src_info.child.toType().abiAlignment(mod);
const src_align = if (src_info.flags.alignment != .none)
src_info.flags.alignment
else
try sema.typeAbiAlignment(src_info.child.toType());
const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse
dest_info.child.toType().abiAlignment(mod);
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
try sema.typeAbiAlignment(dest_info.child.toType());
if (dest_align > src_align) {
if (dest_align.compare(.gt, src_align)) {
return InMemoryCoercionResult{ .ptr_alignment = .{
.actual = src_align,
.wanted = dest_align,
@@ -30378,13 +30467,17 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
if (inst_info.flags.alignment == .none and dest_info.flags.alignment == .none) return true;
if (len0) return true;
const inst_align = inst_info.flags.alignment.toByteUnitsOptional() orelse
const inst_align = if (inst_info.flags.alignment != .none)
inst_info.flags.alignment
else
inst_info.child.toType().abiAlignment(mod);
const dest_align = dest_info.flags.alignment.toByteUnitsOptional() orelse
const dest_align = if (dest_info.flags.alignment != .none)
dest_info.flags.alignment
else
dest_info.child.toType().abiAlignment(mod);
if (dest_align > inst_align) {
if (dest_align.compare(.gt, inst_align)) {
in_memory_result.* = .{ .ptr_alignment = .{
.actual = inst_align,
.wanted = dest_align,
@@ -30598,7 +30691,7 @@ fn coerceAnonStructToUnion(
else
.{ .count = anon_struct_type.names.len },
.struct_type => |struct_type| name: {
const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys();
const field_names = struct_type.field_names.get(ip);
break :name if (field_names.len == 1)
.{ .name = field_names[0] }
else
@@ -30869,8 +30962,8 @@ fn coerceTupleToStruct(
return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src);
}
const fields = struct_ty.structFields(mod);
const field_vals = try sema.arena.alloc(InternPool.Index, fields.count());
const struct_type = mod.typeToStruct(struct_ty).?;
const field_vals = try sema.arena.alloc(InternPool.Index, struct_type.field_types.len);
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
@memset(field_refs, .none);
@@ -30878,10 +30971,7 @@ fn coerceTupleToStruct(
var runtime_src: ?LazySrcLoc = null;
const field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
struct_obj.fields.count()
else
0,
.struct_type => |s| s.field_types.len,
else => unreachable,
};
for (0..field_count) |field_index_usize| {
@@ -30893,22 +30983,23 @@ fn coerceTupleToStruct(
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
.struct_type => |s| s.field_names.get(ip)[field_i],
else => unreachable,
};
const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src);
const field = fields.values()[field_index];
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
const coerced = try sema.coerce(block, field.ty, elem_ref, field_src);
const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
field_refs[field_index] = coerced;
if (field.is_comptime) {
if (struct_type.fieldIsComptime(ip, field_index)) {
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
return sema.failWithNeededComptime(block, field_src, .{
.needed_comptime_reason = "value stored in comptime field must be comptime-known",
});
};
if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) {
const field_init = struct_type.field_inits.get(ip)[field_index].toValue();
if (!init_val.eql(field_init, field_ty, sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
@@ -30928,10 +31019,10 @@ fn coerceTupleToStruct(
for (field_refs, 0..) |*field_ref, i| {
if (field_ref.* != .none) continue;
const field_name = fields.keys()[i];
const field = fields.values()[i];
const field_name = struct_type.field_names.get(ip)[i];
const field_default_val = struct_type.fieldInit(ip, i);
const field_src = inst_src; // TODO better source location
if (field.default_val == .none) {
if (field_default_val == .none) {
const template = "missing struct field: {}";
const args = .{field_name.fmt(ip)};
if (root_msg) |msg| {
@@ -30942,9 +31033,9 @@ fn coerceTupleToStruct(
continue;
}
if (runtime_src == null) {
field_vals[i] = field.default_val;
field_vals[i] = field_default_val;
} else {
field_ref.* = Air.internedToRef(field.default_val);
field_ref.* = Air.internedToRef(field_default_val);
}
}
@@ -30980,10 +31071,7 @@ fn coerceTupleToTuple(
const ip = &mod.intern_pool;
const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
struct_obj.fields.count()
else
0,
.struct_type => |struct_type| struct_type.field_types.len,
else => unreachable,
};
const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count);
@@ -30993,10 +31081,7 @@ fn coerceTupleToTuple(
const inst_ty = sema.typeOf(inst);
const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj|
struct_obj.fields.count()
else
0,
.struct_type => |struct_type| struct_type.field_types.len,
else => unreachable,
};
if (src_field_count > dest_field_count) return error.NotCoercible;
@@ -31011,7 +31096,7 @@ fn coerceTupleToTuple(
anon_struct_type.names.get(ip)[field_i]
else
try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}),
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i],
.struct_type => |struct_type| struct_type.field_names.get(ip)[field_i],
else => unreachable,
};
@@ -31019,20 +31104,20 @@ fn coerceTupleToTuple(
return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{});
const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize].toType(),
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize],
.struct_type => |struct_type| struct_type.field_types.get(ip)[field_index_usize],
else => unreachable,
};
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize],
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val,
.struct_type => |struct_type| struct_type.fieldInit(ip, field_index_usize),
else => unreachable,
};
const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src);
const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i);
const coerced = try sema.coerce(block, field_ty, elem_ref, field_src);
const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src);
field_refs[field_index] = coerced;
if (default_val != .none) {
const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
@@ -31041,7 +31126,7 @@ fn coerceTupleToTuple(
});
};
if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) {
if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) {
return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i);
}
}
@@ -31058,18 +31143,19 @@ fn coerceTupleToTuple(
var root_msg: ?*Module.ErrorMsg = null;
errdefer if (root_msg) |msg| msg.destroy(sema.gpa);
for (field_refs, 0..) |*field_ref, i| {
for (field_refs, 0..) |*field_ref, i_usize| {
const i: u32 = @intCast(i_usize);
if (field_ref.* != .none) continue;
const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) {
.anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i],
.struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val,
.struct_type => |struct_type| struct_type.fieldInit(ip, i),
else => unreachable,
};
const field_src = inst_src; // TODO better source location
if (default_val == .none) {
if (tuple_ty.isTuple(mod)) {
const field_name = tuple_ty.structFieldName(i, mod).unwrap() orelse {
const template = "missing tuple field: {d}";
if (root_msg) |msg| {
try sema.errNote(block, field_src, msg, template, .{i});
@@ -31077,9 +31163,9 @@ fn coerceTupleToTuple(
root_msg = try sema.errMsg(block, field_src, template, .{i});
}
continue;
}
};
const template = "missing struct field: {}";
const args = .{tuple_ty.structFieldName(i, mod).fmt(ip)};
const args = .{field_name.fmt(ip)};
if (root_msg) |msg| {
try sema.errNote(block, field_src, msg, template, args);
} else {
@@ -31229,7 +31315,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo
const decl = mod.declPtr(decl_index);
const decl_tv = try decl.typedValue();
const ptr_ty = try mod.ptrType(.{
const ptr_ty = try sema.ptrType(.{
.child = decl_tv.ty.toIntern(),
.flags = .{
.alignment = decl.alignment,
@@ -31283,14 +31369,14 @@ fn analyzeRef(
try sema.requireRuntimeBlock(block, src, null);
const address_space = target_util.defaultAddressSpace(mod.getTarget(), .local);
const ptr_type = try mod.ptrType(.{
const ptr_type = try sema.ptrType(.{
.child = operand_ty.toIntern(),
.flags = .{
.is_const = true,
.address_space = address_space,
},
});
const mut_ptr_type = try mod.ptrType(.{
const mut_ptr_type = try sema.ptrType(.{
.child = operand_ty.toIntern(),
.flags = .{ .address_space = address_space },
});
@@ -31662,7 +31748,7 @@ fn analyzeSlice(
assert(manyptr_ty_key.flags.size == .One);
manyptr_ty_key.child = elem_ty.toIntern();
manyptr_ty_key.flags.size = .Many;
break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src);
break :ptr try sema.coerceCompatiblePtrs(block, try sema.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src);
} else ptr_or_slice;
const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
@@ -31885,7 +31971,7 @@ fn analyzeSlice(
if (opt_new_len_val) |new_len_val| {
const new_len_int = new_len_val.toUnsignedInt(mod);
const return_ty = try mod.ptrType(.{
const return_ty = try sema.ptrType(.{
.child = (try mod.arrayType(.{
.len = new_len_int,
.sentinel = if (sentinel) |s| s.toIntern() else .none,
@@ -31946,7 +32032,7 @@ fn analyzeSlice(
return sema.fail(block, src, "non-zero length slice of undefined pointer", .{});
}
const return_ty = try mod.ptrType(.{
const return_ty = try sema.ptrType(.{
.child = elem_ty.toIntern(),
.sentinel = if (sentinel) |s| s.toIntern() else .none,
.flags = .{
@@ -33181,12 +33267,17 @@ fn resolvePeerTypesInner(
}
// Note that the align can be always non-zero; Module.ptrType will canonicalize it
ptr_info.flags.alignment = Alignment.fromByteUnits(@min(
ptr_info.flags.alignment.toByteUnitsOptional() orelse
ptr_info.flags.alignment = InternPool.Alignment.min(
if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
ptr_info.child.toType().abiAlignment(mod),
peer_info.flags.alignment.toByteUnitsOptional() orelse
if (peer_info.flags.alignment != .none)
peer_info.flags.alignment
else
peer_info.child.toType().abiAlignment(mod),
));
);
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
return .{ .conflict = .{
.peer_idx_a = first_idx,
@@ -33208,7 +33299,7 @@ fn resolvePeerTypesInner(
opt_ptr_info = ptr_info;
}
return .{ .success = try mod.ptrType(opt_ptr_info.?) };
return .{ .success = try sema.ptrType(opt_ptr_info.?) };
},
.ptr => {
@@ -33260,12 +33351,17 @@ fn resolvePeerTypesInner(
} };
// Note that the align can be always non-zero; Type.ptr will canonicalize it
ptr_info.flags.alignment = Alignment.fromByteUnits(@min(
ptr_info.flags.alignment.toByteUnitsOptional() orelse
ptr_info.child.toType().abiAlignment(mod),
peer_info.flags.alignment.toByteUnitsOptional() orelse
peer_info.child.toType().abiAlignment(mod),
));
ptr_info.flags.alignment = Alignment.min(
if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
try sema.typeAbiAlignment(ptr_info.child.toType()),
if (peer_info.flags.alignment != .none)
peer_info.flags.alignment
else
try sema.typeAbiAlignment(peer_info.child.toType()),
);
if (ptr_info.flags.address_space != peer_info.flags.address_space) {
return generic_err;
@@ -33513,7 +33609,7 @@ fn resolvePeerTypesInner(
},
}
return .{ .success = try mod.ptrType(opt_ptr_info.?) };
return .{ .success = try sema.ptrType(opt_ptr_info.?) };
},
.func => {
@@ -33802,8 +33898,9 @@ fn resolvePeerTypesInner(
}
if (!is_tuple) {
for (field_names, 0..) |expected, field_idx| {
const actual = ty.structFieldName(field_idx, mod);
for (field_names, 0..) |expected, field_index_usize| {
const field_index: u32 = @intCast(field_index_usize);
const actual = ty.structFieldName(field_index, mod).unwrap().?;
if (actual == expected) continue;
return .{ .conflict = .{
.peer_idx_a = first_idx,
@@ -34190,104 +34287,246 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void {
}
}
fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
/// Resolve a struct's alignment only without triggering resolution of its layout.
/// Asserts that the alignment is not yet resolved and the layout is non-packed.
pub fn resolveStructAlignment(
sema: *Sema,
ty: InternPool.Index,
struct_type: InternPool.Key.StructType,
) CompileError!Alignment {
const mod = sema.mod;
try sema.resolveTypeFields(ty);
if (mod.typeToStruct(ty)) |struct_obj| {
switch (struct_obj.status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(mod),
"struct '{}' depends on itself",
.{ty.fmt(mod)},
);
return sema.failWithOwnedErrorMsg(null, msg);
},
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
const prev_status = struct_obj.status;
errdefer if (struct_obj.status == .layout_wip) {
struct_obj.status = prev_status;
};
const ip = &mod.intern_pool;
const target = mod.getTarget();
struct_obj.status = .layout_wip;
for (struct_obj.fields.values(), 0..) |field, i| {
sema.resolveTypeLayout(field.ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
return err;
},
else => return err,
};
}
assert(struct_type.flagsPtr(ip).alignment == .none);
assert(struct_type.layout != .Packed);
if (struct_obj.layout == .Packed) {
try semaBackingIntType(mod, struct_obj);
}
struct_obj.status = .have_layout;
_ = try sema.typeRequiresComptime(ty);
if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(mod),
"struct layout depends on it having runtime bits",
.{},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
if (struct_obj.layout == .Auto and !struct_obj.is_tuple and
mod.backendSupportsFeature(.field_reordering))
{
const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count());
for (struct_obj.fields.values(), 0..) |field, i| {
optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty))
@intCast(i)
else
Module.Struct.omitted_field;
}
const AlignSortContext = struct {
struct_obj: *Module.Struct,
sema: *Sema,
fn lessThan(ctx: @This(), a: u32, b: u32) bool {
const m = ctx.sema.mod;
if (a == Module.Struct.omitted_field) return false;
if (b == Module.Struct.omitted_field) return true;
return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) >
ctx.struct_obj.fields.values()[b].ty.abiAlignment(m);
}
};
mem.sort(u32, optimized_order, AlignSortContext{
.struct_obj = struct_obj,
.sema = sema,
}, AlignSortContext.lessThan);
struct_obj.optimized_order = optimized_order.ptr;
}
if (struct_type.flagsPtr(ip).field_types_wip) {
// We'll guess "pointer-aligned", if the struct has an
// underaligned pointer field then some allocations
// might require explicit alignment.
//TODO write this bit and emit an error later if incorrect
//struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
struct_type.flagsPtr(ip).alignment = result;
return result;
}
// otherwise it's a tuple; no need to resolve anything
try sema.resolveTypeFieldsStruct(ty, struct_type);
if (struct_type.setAlignmentWip(ip)) {
// We'll guess "pointer-aligned", if the struct has an
// underaligned pointer field then some allocations
// might require explicit alignment.
//TODO write this bit and emit an error later if incorrect
//struct_type.flagsPtr(ip).assumed_pointer_aligned = true;
const result = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
struct_type.flagsPtr(ip).alignment = result;
return result;
}
defer struct_type.clearAlignmentWip(ip);
var result: Alignment = .@"1";
for (0..struct_type.field_types.len) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty))
continue;
const field_align = try sema.structFieldAlignment(
struct_type.fieldAlign(ip, i),
field_ty,
struct_type.layout,
);
result = result.maxStrict(field_align);
}
struct_type.flagsPtr(ip).alignment = result;
return result;
}
fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
const gpa = mod.gpa;
fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const struct_type = mod.typeToStruct(ty) orelse return;
var fields_bit_sum: u64 = 0;
for (struct_obj.fields.values()) |field| {
fields_bit_sum += field.ty.bitSize(mod);
if (struct_type.haveLayout(ip))
return;
try sema.resolveTypeFields(ty);
if (struct_type.layout == .Packed) {
try semaBackingIntType(mod, struct_type);
return;
}
const decl_index = struct_obj.owner_decl;
if (struct_type.setLayoutWip(ip)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
"struct '{}' depends on itself",
.{ty.fmt(mod)},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
defer struct_type.clearLayoutWip(ip);
const aligns = try sema.arena.alloc(Alignment, struct_type.field_types.len);
const sizes = try sema.arena.alloc(u64, struct_type.field_types.len);
var big_align: Alignment = .@"1";
for (aligns, sizes, 0..) |*field_align, *field_size, i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
struct_type.offsets.get(ip)[i] = 0;
field_size.* = 0;
field_align.* = .none;
continue;
}
field_size.* = sema.typeAbiSize(field_ty) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return err;
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
return err;
},
else => return err,
};
field_align.* = try sema.structFieldAlignment(
struct_type.fieldAlign(ip, i),
field_ty,
struct_type.layout,
);
big_align = big_align.maxStrict(field_align.*);
}
if (struct_type.flagsPtr(ip).assumed_runtime_bits and !(try sema.typeHasRuntimeBits(ty))) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
"struct layout depends on it having runtime bits",
.{},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
if (struct_type.hasReorderedFields()) {
const runtime_order = struct_type.runtime_order.get(ip);
for (runtime_order, 0..) |*ro, i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (struct_type.fieldIsComptime(ip, i) or try sema.typeRequiresComptime(field_ty)) {
ro.* = .omitted;
} else {
ro.* = @enumFromInt(i);
}
}
const RuntimeOrder = InternPool.Key.StructType.RuntimeOrder;
const AlignSortContext = struct {
aligns: []const Alignment,
fn lessThan(ctx: @This(), a: RuntimeOrder, b: RuntimeOrder) bool {
if (a == .omitted) return false;
if (b == .omitted) return true;
const a_align = ctx.aligns[@intFromEnum(a)];
const b_align = ctx.aligns[@intFromEnum(b)];
return a_align.compare(.gt, b_align);
}
};
if (struct_type.isTuple(ip) or !mod.backendSupportsFeature(.field_reordering)) {
// TODO: don't handle tuples differently. This logic exists only because it
// uncovers latent bugs if removed. Fix the latent bugs and remove this logic!
// Likewise, implement field reordering support in all the backends!
// This logic does not reorder fields; it only moves the omitted ones to the end
// so that logic elsewhere does not need to special-case tuples.
var i: usize = 0;
var off: usize = 0;
while (i + off < runtime_order.len) {
if (runtime_order[i + off] == .omitted) {
off += 1;
continue;
}
runtime_order[i] = runtime_order[i + off];
i += 1;
}
@memset(runtime_order[i..], .omitted);
} else {
mem.sortUnstable(RuntimeOrder, runtime_order, AlignSortContext{
.aligns = aligns,
}, AlignSortContext.lessThan);
}
}
// Calculate size, alignment, and field offsets.
const offsets = struct_type.offsets.get(ip);
var it = struct_type.iterateRuntimeOrder(ip);
var offset: u64 = 0;
while (it.next()) |i| {
offsets[i] = @intCast(aligns[i].forward(offset));
offset = offsets[i] + sizes[i];
}
struct_type.size(ip).* = @intCast(big_align.forward(offset));
const flags = struct_type.flagsPtr(ip);
flags.alignment = big_align;
flags.layout_resolved = true;
_ = try sema.typeRequiresComptime(ty);
}
fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
const decl_index = struct_type.decl.unwrap().?;
const decl = mod.declPtr(decl_index);
const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
const zir = mod.namespacePtr(struct_type.namespace.unwrap().?).file_scope.zir;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
var block: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_type.namespace.unwrap() orelse decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer assert(block.instructions.items.len == 0);
const fields_bit_sum = blk: {
var accumulator: u64 = 0;
for (0..struct_type.field_types.len) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
accumulator += try field_ty.bitSizeAdvanced(mod, &sema);
}
break :blk accumulator;
};
const extended = zir.instructions.items(.data)[struct_type.zir_index].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
@@ -34300,40 +34539,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1;
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
var block: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_obj.namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
defer assert(block.instructions.items.len == 0);
const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 };
const backing_int_ty = blk: {
if (backing_int_body_len == 0) {
@@ -34341,48 +34546,24 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
break :blk try sema.resolveType(&block, backing_int_src, backing_int_ref);
} else {
const body = zir.extra[extra_index..][0..backing_int_body_len];
const ty_ref = try sema.resolveBody(&block, body, struct_obj.zir_index);
const ty_ref = try sema.resolveBody(&block, body, struct_type.zir_index);
break :blk try sema.analyzeAsType(&block, backing_int_src, ty_ref);
}
};
try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum);
struct_obj.backing_int_ty = backing_int_ty;
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
_ = try ct_decl.internValue(mod);
}
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
} else {
if (fields_bit_sum > std.math.maxInt(u16)) {
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = undefined,
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = undefined,
};
defer sema.deinit();
var block: Block = .{
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_obj.namespace,
.wip_capture_scope = undefined,
.instructions = .{},
.inlining = null,
.is_comptime = true,
};
return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum});
}
struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum));
struct_type.backingIntType(ip).* = backing_int_ty.toIntern();
}
for (comptime_mutable_decls.items) |ct_decl_index| {
const ct_decl = mod.declPtr(ct_decl_index);
_ = try ct_decl.internValue(mod);
}
}
@@ -34532,30 +34713,20 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void {
try sema.resolveStructLayout(ty);
const mod = sema.mod;
try sema.resolveTypeFields(ty);
const struct_obj = mod.typeToStruct(ty).?;
const ip = &mod.intern_pool;
const struct_type = mod.typeToStruct(ty).?;
switch (struct_obj.status) {
.none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {},
.fully_resolved_wip, .fully_resolved => return,
if (struct_type.setFullyResolved(ip)) return;
errdefer struct_type.clearFullyResolved(ip);
// After we have resolve struct layout we have to go over the fields again to
// make sure pointer fields get their child types resolved as well.
// See also similar code for unions.
for (0..struct_type.field_types.len) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
try sema.resolveTypeFully(field_ty);
}
{
// After we have resolve struct layout we have to go over the fields again to
// make sure pointer fields get their child types resolved as well.
// See also similar code for unions.
const prev_status = struct_obj.status;
errdefer struct_obj.status = prev_status;
struct_obj.status = .fully_resolved_wip;
for (struct_obj.fields.values()) |field| {
try sema.resolveTypeFully(field.ty);
}
struct_obj.status = .fully_resolved;
}
// And let's not forget comptime-only status.
_ = try sema.typeRequiresComptime(ty);
}
fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
@@ -34591,8 +34762,10 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void {
pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const ty_ip = ty.toIntern();
switch (ty.toIntern()) {
switch (ty_ip) {
.var_args_param_type => unreachable,
.none => unreachable,
@@ -34673,20 +34846,15 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!void {
.empty_struct => unreachable,
.generic_poison => unreachable,
else => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
else => switch (ip.items.items(.tag)[@intFromEnum(ty_ip)]) {
.type_struct,
.type_struct_ns,
.type_union,
.simple_type,
=> switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return;
try sema.resolveTypeFieldsStruct(ty, struct_obj);
},
.union_type => |union_type| try sema.resolveTypeFieldsUnion(ty, union_type),
.simple_type => |simple_type| try sema.resolveSimpleType(simple_type),
else => unreachable,
},
.type_struct_packed,
.type_struct_packed_inits,
=> try sema.resolveTypeFieldsStruct(ty_ip, ip.indexToKey(ty_ip).struct_type),
.type_union => try sema.resolveTypeFieldsUnion(ty_ip.toType(), ip.indexToKey(ty_ip).union_type),
.simple_type => try sema.resolveSimpleType(ip.indexToKey(ty_ip).simple_type),
else => {},
},
}
@@ -34716,43 +34884,41 @@ fn resolveSimpleType(sema: *Sema, simple_type: InternPool.SimpleType) CompileErr
fn resolveTypeFieldsStruct(
sema: *Sema,
ty: Type,
struct_obj: *Module.Struct,
ty: InternPool.Index,
struct_type: InternPool.Key.StructType,
) CompileError!void {
switch (sema.mod.declPtr(struct_obj.owner_decl).analysis) {
const mod = sema.mod;
const ip = &mod.intern_pool;
// If there is no owner decl it means the struct has no fields.
const owner_decl = struct_type.decl.unwrap() orelse return;
switch (mod.declPtr(owner_decl).analysis) {
.file_failure,
.dependency_failure,
.sema_failure,
.sema_failure_retryable,
=> {
sema.owner_decl.analysis = .dependency_failure;
sema.owner_decl.generation = sema.mod.generation;
sema.owner_decl.generation = mod.generation;
return error.AnalysisFail;
},
else => {},
}
switch (struct_obj.status) {
.none => {},
.field_types_wip => {
const msg = try Module.ErrorMsg.create(
sema.gpa,
struct_obj.srcLoc(sema.mod),
"struct '{}' depends on itself",
.{ty.fmt(sema.mod)},
);
return sema.failWithOwnedErrorMsg(null, msg);
},
.have_field_types,
.have_layout,
.layout_wip,
.fully_resolved_wip,
.fully_resolved,
=> return,
}
struct_obj.status = .field_types_wip;
errdefer struct_obj.status = .none;
try semaStructFields(sema.mod, struct_obj);
if (struct_type.haveFieldTypes(ip)) return;
if (struct_type.setTypesWip(ip)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(owner_decl).srcLoc(mod),
"struct '{}' depends on itself",
.{ty.toType().fmt(mod)},
);
return sema.failWithOwnedErrorMsg(null, msg);
}
defer struct_type.clearTypesWip(ip);
try semaStructFields(mod, sema.arena, struct_type);
}
fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Key.UnionType) CompileError!void {
@@ -34936,12 +35102,19 @@ fn resolveInferredErrorSetTy(
}
}
fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void {
fn semaStructFields(
mod: *Module,
arena: Allocator,
struct_type: InternPool.Key.StructType,
) CompileError!void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
const decl_index = struct_obj.owner_decl;
const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir;
const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended;
const decl_index = struct_type.decl.unwrap() orelse return;
const decl = mod.declPtr(decl_index);
const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace;
const zir = mod.namespacePtr(namespace_index).file_scope.zir;
const zir_index = struct_type.zir_index;
const extended = zir.instructions.items(.data)[zir_index].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand;
@@ -34977,18 +35150,16 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
while (decls_it.next()) |_| {}
extra_index = decls_it.extra_index;
if (fields_len == 0) {
if (struct_obj.layout == .Packed) {
try semaBackingIntType(mod, struct_obj);
}
struct_obj.status = .have_layout;
return;
}
const decl = mod.declPtr(decl_index);
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
if (fields_len == 0) switch (struct_type.layout) {
.Packed => {
try semaBackingIntType(mod, struct_type);
return;
},
.Auto, .Extern => {
struct_type.flagsPtr(ip).layout_resolved = true;
return;
},
};
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
@@ -34996,7 +35167,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = analysis_arena.allocator(),
.arena = arena,
.code = zir,
.owner_decl = decl,
.owner_decl_index = decl_index,
@@ -35013,7 +35184,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.parent = null,
.sema = &sema,
.src_decl = decl_index,
.namespace = struct_obj.namespace,
.namespace = namespace_index,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
@@ -35021,9 +35192,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
};
defer assert(block_scope.instructions.items.len == 0);
struct_obj.fields = .{};
try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len);
const Field = struct {
type_body_len: u32 = 0,
align_body_len: u32 = 0,
@@ -35031,7 +35199,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
type_ref: Zir.Inst.Ref = .none,
};
const fields = try sema.arena.alloc(Field, fields_len);
var any_inits = false;
var any_aligned = false;
{
const bits_per_field = 4;
@@ -35056,9 +35226,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
cur_bit_bag >>= 1;
var field_name_zir: ?[:0]const u8 = null;
if (is_comptime) struct_type.setFieldComptime(ip, field_i);
var opt_field_name_zir: ?[:0]const u8 = null;
if (!small.is_tuple) {
field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
opt_field_name_zir = zir.nullTerminatedString(zir.extra[extra_index]);
extra_index += 1;
}
extra_index += 1; // doc_comment
@@ -35073,37 +35245,27 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
extra_index += 1;
// This string needs to outlive the ZIR code.
const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s|
s
else
try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}));
if (opt_field_name_zir) |field_name_zir| {
const field_name = try ip.getOrPutString(gpa, field_name_zir);
if (struct_type.addFieldName(ip, field_name)) |other_index| {
const msg = msg: {
const field_src = mod.fieldSrcLoc(decl_index, .{ .index = field_i }).lazy;
const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)});
errdefer msg.destroy(gpa);
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
const msg = msg: {
const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy;
const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)});
errdefer msg.destroy(gpa);
const prev_field_index = struct_obj.fields.getIndex(field_name).?;
const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index });
try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
const prev_field_src = mod.fieldSrcLoc(decl_index, .{ .index = other_index });
try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{});
try sema.errNote(&block_scope, src, msg, "struct declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
}
}
gop.value_ptr.* = .{
.ty = Type.noreturn,
.abi_align = .none,
.default_val = .none,
.is_comptime = is_comptime,
.offset = undefined,
};
if (has_align) {
fields[field_i].align_body_len = zir.extra[extra_index];
extra_index += 1;
any_aligned = true;
}
if (has_init) {
fields[field_i].init_body_len = zir.extra[extra_index];
@@ -35122,7 +35284,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
if (zir_field.type_ref != .none) {
break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .type,
}).lazy;
@@ -35135,10 +35297,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
assert(zir_field.type_body_len != 0);
const body = zir.extra[extra_index..][0..zir_field.type_body_len];
extra_index += body.len;
const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
const ty_ref = try sema.resolveBody(&block_scope, body, zir_index);
break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .type,
}).lazy;
@@ -35152,12 +35314,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
return error.GenericPoison;
}
const field = &struct_obj.fields.values()[field_i];
field.ty = field_ty;
struct_type.field_types.get(ip)[field_i] = field_ty.toIntern();
if (field_ty.zigTypeTag(mod) == .Opaque) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .type,
}).lazy;
@@ -35171,7 +35332,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
}
if (field_ty.zigTypeTag(mod) == .NoReturn) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .type,
}).lazy;
@@ -35183,45 +35344,49 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
}
if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
switch (struct_type.layout) {
.Extern => if (!try sema.validateExternType(field_ty, .struct_field)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .struct_field);
try sema.addDeclaredHereNote(msg, field.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
} else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
},
.Packed => if (!validatePackedType(field_ty, mod)) {
const msg = msg: {
const ty_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .type,
});
const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty);
try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty);
try sema.addDeclaredHereNote(msg, field.ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block_scope, msg);
},
else => {},
}
if (zir_field.align_body_len > 0) {
const body = zir.extra[extra_index..][0..zir_field.align_body_len];
extra_index += body.len;
const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
const align_ref = try sema.resolveBody(&block_scope, body, zir_index);
const field_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) {
error.NeededSourceLocation => {
const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const align_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .alignment,
}).lazy;
@@ -35230,36 +35395,38 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
},
else => |e| return e,
};
struct_type.field_aligns.get(ip)[field_i] = field_align;
}
extra_index += zir_field.init_body_len;
}
struct_obj.status = .have_field_types;
// TODO: there seems to be no mechanism to catch when an init depends on
// another init that hasn't been resolved.
if (any_inits) {
extra_index = bodies_index;
for (fields, 0..) |zir_field, field_i| {
const field_ty = struct_type.field_types.get(ip)[field_i].toType();
extra_index += zir_field.type_body_len;
extra_index += zir_field.align_body_len;
if (zir_field.init_body_len > 0) {
const body = zir.extra[extra_index..][0..zir_field.init_body_len];
extra_index += body.len;
const init = try sema.resolveBody(&block_scope, body, struct_obj.zir_index);
const field = &struct_obj.fields.values()[field_i];
const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) {
const init = try sema.resolveBody(&block_scope, body, zir_index);
const coerced = sema.coerce(&block_scope, field_ty, init, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const init_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .value,
}).lazy;
_ = try sema.coerce(&block_scope, field.ty, init, init_src);
_ = try sema.coerce(&block_scope, field_ty, init, init_src);
unreachable;
},
else => |e| return e,
};
const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse {
const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{
const init_src = mod.fieldSrcLoc(decl_index, .{
.index = field_i,
.range = .value,
}).lazy;
@@ -35267,7 +35434,8 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.needed_comptime_reason = "struct field default value must be comptime-known",
});
};
field.default_val = try default_val.intern(field.ty, mod);
const field_init = try default_val.intern(field_ty, mod);
struct_type.field_inits.get(ip)[field_i] = field_init;
}
}
}
@@ -35275,8 +35443,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
const ct_decl = mod.declPtr(ct_decl_index);
_ = try ct_decl.internValue(mod);
}
struct_obj.have_field_inits = true;
}
fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void {
@@ -36060,6 +36226,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.type_struct,
.type_struct_ns,
.type_struct_anon,
.type_struct_packed,
.type_struct_packed_inits,
.type_tuple_anon,
.type_union,
=> switch (ip.indexToKey(ty.toIntern())) {
@@ -36081,41 +36249,46 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.struct_type => |struct_type| {
try sema.resolveTypeFields(ty);
if (mod.structPtrUnwrap(struct_type.index)) |s| {
const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count());
for (field_vals, s.fields.values(), 0..) |*field_val, field, i| {
if (field.is_comptime) {
field_val.* = field.default_val;
continue;
}
if (field.ty.eql(ty, mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
s.srcLoc(mod),
"struct '{}' depends on itself",
.{ty.fmt(mod)},
);
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
return sema.failWithOwnedErrorMsg(null, msg);
}
if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| {
field_val.* = try field_opv.intern(field.ty, mod);
} else return null;
}
// In this case the struct has no runtime-known fields and
if (struct_type.field_types.len == 0) {
// In this case the struct has no fields at all and
// therefore has one possible value.
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
.storage = .{ .elems = &.{} },
} })).toValue();
}
// In this case the struct has no fields at all and
const field_vals = try sema.arena.alloc(
InternPool.Index,
struct_type.field_types.len,
);
for (field_vals, 0..) |*field_val, i| {
if (struct_type.fieldIsComptime(ip, i)) {
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (field_ty.eql(ty, mod)) {
const msg = try Module.ErrorMsg.create(
sema.gpa,
mod.declPtr(struct_type.decl.unwrap().?).srcLoc(mod),
"struct '{}' depends on itself",
.{ty.fmt(mod)},
);
try sema.addFieldErrNote(ty, i, msg, "while checking this field", .{});
return sema.failWithOwnedErrorMsg(null, msg);
}
if (try sema.typeHasOnePossibleValue(field_ty)) |field_opv| {
field_val.* = try field_opv.intern(field_ty, mod);
} else return null;
}
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
.storage = .{ .elems = field_vals },
} })).toValue();
},
@@ -36266,7 +36439,7 @@ fn analyzeComptimeAlloc(
// Needed to make an anon decl with type `var_type` (the `finish()` call below).
_ = try sema.typeHasOnePossibleValue(var_type);
const ptr_type = try mod.ptrType(.{
const ptr_type = try sema.ptrType(.{
.child = var_type.toIntern(),
.flags = .{
.alignment = alignment,
@@ -36574,25 +36747,36 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
=> true,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
switch (struct_obj.requires_comptime) {
if (struct_type.layout == .Packed) {
// packed structs cannot be comptime-only because they have a well-defined
// memory layout and every field has a well-defined bit pattern.
return false;
}
switch (struct_type.flagsPtr(ip).requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
if (struct_obj.status == .field_types_wip)
if (struct_type.flagsPtr(ip).field_types_wip)
return false;
try sema.resolveTypeFieldsStruct(ty, struct_obj);
try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
struct_obj.requires_comptime = .wip;
for (struct_obj.fields.values()) |field| {
if (field.is_comptime) continue;
if (try sema.typeRequiresComptime(field.ty)) {
struct_obj.requires_comptime = .yes;
struct_type.flagsPtr(ip).requires_comptime = .wip;
for (0..struct_type.field_types.len) |i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i];
if (try sema.typeRequiresComptime(field_ty.toType())) {
// Note that this does not cause the layout to
// be considered resolved. Comptime-only types
// still maintain a layout of their
// runtime-known fields.
struct_type.flagsPtr(ip).requires_comptime = .yes;
return true;
}
}
struct_obj.requires_comptime = .no;
struct_type.flagsPtr(ip).requires_comptime = .no;
return false;
},
}
@@ -36673,40 +36857,41 @@ fn typeAbiSize(sema: *Sema, ty: Type) !u64 {
return ty.abiSize(sema.mod);
}
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 {
fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!Alignment {
return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar;
}
/// Not valid to call for packed unions.
/// Keep implementation in sync with `Module.unionFieldNormalAlignment`.
/// TODO: this returns alignment in byte units should should be a u64
fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !u32 {
fn unionFieldAlignment(sema: *Sema, u: InternPool.UnionType, field_index: u32) !Alignment {
const mod = sema.mod;
const ip = &mod.intern_pool;
if (u.fieldAlign(ip, field_index).toByteUnitsOptional()) |a| return @intCast(a);
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
const field_ty = u.field_types.get(ip)[field_index].toType();
if (field_ty.isNoReturn(sema.mod)) return 0;
return @intCast(try sema.typeAbiAlignment(field_ty));
if (field_ty.isNoReturn(sema.mod)) return .none;
return sema.typeAbiAlignment(field_ty);
}
/// Keep implementation in sync with `Module.Struct.Field.alignment`.
fn structFieldAlignment(sema: *Sema, field: Module.Struct.Field, layout: std.builtin.Type.ContainerLayout) !u32 {
/// Keep implementation in sync with `Module.structFieldAlignment`.
fn structFieldAlignment(
sema: *Sema,
explicit_alignment: InternPool.Alignment,
field_ty: Type,
layout: std.builtin.Type.ContainerLayout,
) !Alignment {
if (explicit_alignment != .none)
return explicit_alignment;
const mod = sema.mod;
if (field.abi_align.toByteUnitsOptional()) |a| {
assert(layout != .Packed);
return @intCast(a);
}
switch (layout) {
.Packed => return 0,
.Auto => if (mod.getTarget().ofmt != .c) {
return sema.typeAbiAlignment(field.ty);
},
.Packed => return .none,
.Auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
.Extern => {},
}
// extern
const ty_abi_align = try sema.typeAbiAlignment(field.ty);
if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
return @max(ty_abi_align, 16);
const ty_abi_align = try sema.typeAbiAlignment(field_ty);
if (field_ty.isAbiInt(mod) and field_ty.intInfo(mod).bits >= 128) {
return ty_abi_align.maxStrict(.@"16");
}
return ty_abi_align;
}
@@ -36752,14 +36937,14 @@ fn structFieldIndex(
field_src: LazySrcLoc,
) !u32 {
const mod = sema.mod;
const ip = &mod.intern_pool;
try sema.resolveTypeFields(struct_ty);
if (struct_ty.isAnonStruct(mod)) {
return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src);
} else {
const struct_obj = mod.typeToStruct(struct_ty).?;
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
return @intCast(field_index_usize);
const struct_type = mod.typeToStruct(struct_ty).?;
return struct_type.nameIndex(ip, field_name) orelse
return sema.failWithBadStructFieldAccess(block, struct_type, field_src, field_name);
}
}
@@ -36776,13 +36961,7 @@ fn anonStructFieldIndex(
.anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| {
if (name == field_name) return @intCast(i);
},
.struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
for (struct_obj.fields.keys(), 0..) |name, i| {
if (name == field_name) {
return @intCast(i);
}
}
},
.struct_type => |struct_type| if (struct_type.nameIndex(ip, field_name)) |i| return i,
else => unreachable,
}
return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{
@@ -37167,8 +37346,8 @@ fn intFitsInType(
// If it is u16 or bigger we know the alignment fits without resolving it.
if (info.bits >= max_needed_bits) return true;
const x = try sema.typeAbiAlignment(lazy_ty.toType());
if (x == 0) return true;
const actual_needed_bits = std.math.log2(x) + 1 + @intFromBool(info.signedness == .signed);
if (x == .none) return true;
const actual_needed_bits = @as(usize, x.toLog2Units()) + 1 + @intFromBool(info.signedness == .signed);
return info.bits >= actual_needed_bits;
},
.lazy_size => |lazy_ty| {
@@ -37381,7 +37560,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const vector_info: struct {
host_size: u16 = 0,
alignment: u32 = 0,
alignment: Alignment = .none,
vector_index: VI = .none,
} = if (parent_ty.isVector(mod) and ptr_info.flags.size == .One) blk: {
const elem_bits = elem_ty.bitSize(mod);
@@ -37391,7 +37570,7 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
break :blk .{
.host_size = @intCast(parent_ty.arrayLen(mod)),
.alignment = @intCast(parent_ty.abiAlignment(mod)),
.alignment = parent_ty.abiAlignment(mod),
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime,
};
} else .{};
@@ -37399,9 +37578,9 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
const alignment: Alignment = a: {
// Calculate the new pointer alignment.
if (ptr_info.flags.alignment == .none) {
if (vector_info.alignment != 0) break :a Alignment.fromNonzeroByteUnits(vector_info.alignment);
// ABI-aligned pointer. Any pointer arithmetic maintains the same ABI-alignedness.
break :a .none;
// In case of an ABI-aligned pointer, any pointer arithmetic
// maintains the same ABI-alignedness.
break :a vector_info.alignment;
}
// If the addend is not a comptime-known value we can still count on
// it being a multiple of the type size.
@@ -37413,12 +37592,12 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type {
// non zero).
const new_align: Alignment = @enumFromInt(@min(
@ctz(addend),
@intFromEnum(ptr_info.flags.alignment),
ptr_info.flags.alignment.toLog2Units(),
));
assert(new_align != .none);
break :a new_align;
};
return mod.ptrType(.{
return sema.ptrType(.{
.child = elem_ty.toIntern(),
.flags = .{
.alignment = alignment,
@@ -37473,3 +37652,10 @@ fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool
};
return sema.typeOf(ref).zigTypeTag(sema.mod) == tag;
}
fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
if (info.flags.alignment != .none) {
_ = try sema.typeAbiAlignment(info.child.toType());
}
return sema.mod.ptrType(info);
}
+13 -8
View File
@@ -135,9 +135,10 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
const maybe_elem_val = payload.ptr.maybeElemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
const elem_val = maybe_elem_val orelse return writer.writeAll(".{ (reinterpreted data) }");
if (elem_val.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
}
@@ -153,9 +154,10 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
const maybe_elem_val = payload.ptr.maybeElemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
const elem_val = maybe_elem_val orelse return writer.writeAll("(reinterpreted data) }");
try print(.{
.ty = elem_ty,
.val = elem_val,
@@ -272,7 +274,8 @@ pub fn print(
const max_len = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
for (buf[0..max_len], 0..) |*c, i| {
const elem = try val.elemValue(mod, i);
const maybe_elem = try val.maybeElemValue(mod, i);
const elem = maybe_elem orelse return writer.writeAll(".{ (reinterpreted data) }");
if (elem.isUndef(mod)) break :str;
c.* = @as(u8, @intCast(elem.toUnsignedInt(mod)));
}
@@ -283,9 +286,11 @@ pub fn print(
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const maybe_elem = try val.maybeElemValue(mod, i);
const elem = maybe_elem orelse return writer.writeAll("(reinterpreted data) }");
try print(.{
.ty = elem_ty,
.val = try val.elemValue(mod, i),
.val = elem,
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
@@ -350,11 +355,11 @@ pub fn print(
const container_ty = ptr_container_ty.childType(mod);
switch (container_ty.zigTypeTag(mod)) {
.Struct => {
if (container_ty.isTuple(mod)) {
if (container_ty.structFieldName(@intCast(field.index), mod).unwrap()) |field_name| {
try writer.print(".{i}", .{field_name.fmt(ip)});
} else {
try writer.print("[{d}]", .{field.index});
}
const field_name = container_ty.structFieldName(@as(usize, @intCast(field.index)), mod);
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Union => {
const field_name = mod.typeToUnion(container_ty).?.field_names.get(ip)[@intCast(field.index)];
@@ -432,7 +437,7 @@ fn printAggregate(
if (i != 0) try writer.writeAll(", ");
const field_name = switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(),
.struct_type => |x| x.fieldName(ip, i),
.anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
else => unreachable,
};
+4 -1
View File
@@ -2840,7 +2840,10 @@ pub const Inst = struct {
is_tuple: bool,
name_strategy: NameStrategy,
layout: std.builtin.Type.ContainerLayout,
_: u5 = undefined,
any_default_inits: bool,
any_comptime_fields: bool,
any_aligned_fields: bool,
_: u2 = undefined,
};
};
+20 -27
View File
@@ -23,6 +23,7 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Alignment = InternPool.Alignment;
const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
@@ -506,11 +507,9 @@ fn gen(self: *Self) !void {
// (or w0 when pointer size is 32 bits). As this register
// might get overwritten along the way, save the address
// to the stack.
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
const stack_offset = try self.allocMem(ptr_bytes, ptr_bytes, null);
const stack_offset = try self.allocMem(8, .@"8", null);
try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = ret_ptr_reg });
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
@@ -998,11 +997,11 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
fn allocMem(
self: *Self,
abi_size: u32,
abi_align: u32,
abi_align: Alignment,
maybe_inst: ?Air.Inst.Index,
) !u32 {
assert(abi_size > 0);
assert(abi_align > 0);
assert(abi_align != .none);
// In order to efficiently load and store stack items that fit
// into registers, we bump up the alignment to the next power of
@@ -1010,10 +1009,10 @@ fn allocMem(
const adjusted_align = if (abi_size > 8)
abi_align
else
std.math.ceilPowerOfTwoAssert(u32, abi_size);
Alignment.fromNonzeroByteUnits(std.math.ceilPowerOfTwoAssert(u64, abi_size));
// TODO find a free slot instead of always appending
const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size;
const offset: u32 = @intCast(adjusted_align.forward(self.next_stack_offset) + abi_size);
self.next_stack_offset = offset;
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
@@ -1515,12 +1514,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.typeOf(bin_op.rhs);
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
const stack_offset = try self.allocMem(16, .@"8", inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
try self.genSetStack(len_ty, stack_offset - 8, len);
break :result MCValue{ .stack_offset = stack_offset };
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -3285,9 +3281,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue{ .register = reg };
}
const optional_abi_size = @as(u32, @intCast(optional_ty.abiSize(mod)));
const optional_abi_size: u32 = @intCast(optional_ty.abiSize(mod));
const optional_abi_align = optional_ty.abiAlignment(mod);
const offset = @as(u32, @intCast(payload_ty.abiSize(mod)));
const offset: u32 = @intCast(payload_ty.abiSize(mod));
const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst);
try self.genSetStack(payload_ty, stack_offset, operand);
@@ -3376,7 +3372,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.ptrBitWidth();
const ptr_bits = 64;
const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
@@ -3400,7 +3396,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.ptrBitWidth();
const ptr_bits = 64;
const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
@@ -4272,8 +4268,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.return_value == .stack_offset) {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType(mod);
const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
const ret_abi_align = ret_ty.abiAlignment(mod);
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
const ret_ptr_reg = self.registerAlias(.x0, Type.usize);
@@ -5939,11 +5935,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType(mod);
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst);
const ptr_bytes = 8;
const stack_offset = try self.allocMem(ptr_bytes * 2, .@"8", inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len });
break :result MCValue{ .stack_offset = stack_offset };
@@ -6254,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// We round up NCRN only for non-Apple platforms which allow the 16-byte aligned
// values to spread across odd-numbered registers.
if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) {
if (ty.toType().abiAlignment(mod) == .@"16" and !self.target.isDarwin()) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}
@@ -6272,7 +6265,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
ncrn = 8;
// TODO Apple allows the arguments on the stack to be non-8-byte aligned provided
// that the entire stack space consumed by the arguments is 8-byte aligned.
if (ty.toType().abiAlignment(mod) == 8) {
if (ty.toType().abiAlignment(mod) == .@"8") {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
@@ -6312,10 +6305,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
if (ty.toType().abiSize(mod) > 0) {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
const param_size: u32 = @intCast(ty.toType().abiSize(mod));
const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
stack_offset = @intCast(param_alignment.forward(stack_offset));
result_arg.* = .{ .stack_argument_offset = stack_offset };
stack_offset += param_size;
} else {
+13 -12
View File
@@ -23,6 +23,7 @@ const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const Alignment = InternPool.Alignment;
const Result = codegen.Result;
const CodeGenError = codegen.CodeGenError;
@@ -508,7 +509,7 @@ fn gen(self: *Self) !void {
// The address of where to store the return value is in
// r0. As this register might get overwritten along the
// way, save the address to the stack.
const stack_offset = try self.allocMem(4, 4, null);
const stack_offset = try self.allocMem(4, .@"4", null);
try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 });
self.ret_mcv = MCValue{ .stack_offset = stack_offset };
@@ -986,14 +987,14 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
fn allocMem(
self: *Self,
abi_size: u32,
abi_align: u32,
abi_align: Alignment,
maybe_inst: ?Air.Inst.Index,
) !u32 {
assert(abi_size > 0);
assert(abi_align > 0);
assert(abi_align != .none);
// TODO find a free slot instead of always appending
const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset) + abi_size);
self.next_stack_offset = offset;
self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset);
@@ -1490,7 +1491,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.typeOf(bin_op.rhs);
const stack_offset = try self.allocMem(8, 4, inst);
const stack_offset = try self.allocMem(8, .@"4", inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(len_ty, stack_offset - 4, len);
break :result MCValue{ .stack_offset = stack_offset };
@@ -4251,8 +4252,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
log.debug("airCall: return by reference", .{});
const ret_ty = fn_ty.fnReturnType(mod);
const ret_abi_size = @as(u32, @intCast(ret_ty.abiSize(mod)));
const ret_abi_align = @as(u32, @intCast(ret_ty.abiAlignment(mod)));
const ret_abi_size: u32 = @intCast(ret_ty.abiSize(mod));
const ret_abi_align = ret_ty.abiAlignment(mod);
const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst);
const ptr_ty = try mod.singleMutPtrType(ret_ty);
@@ -5896,7 +5897,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const array_ty = ptr_ty.childType(mod);
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
const stack_offset = try self.allocMem(8, 8, inst);
const stack_offset = try self.allocMem(8, .@"8", inst);
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len });
break :result MCValue{ .stack_offset = stack_offset };
@@ -6201,7 +6202,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
if (ty.toType().abiAlignment(mod) == 8)
if (ty.toType().abiAlignment(mod) == .@"8")
ncrn = std.mem.alignForward(usize, ncrn, 2);
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
@@ -6216,7 +6217,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
return self.fail("TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
if (ty.toType().abiAlignment(mod) == 8)
if (ty.toType().abiAlignment(mod) == .@"8")
nsaa = std.mem.alignForward(u32, nsaa, 8);
result_arg.* = .{ .stack_argument_offset = nsaa };
@@ -6252,10 +6253,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
if (ty.toType().abiSize(mod) > 0) {
const param_size = @as(u32, @intCast(ty.toType().abiSize(mod)));
const param_size: u32 = @intCast(ty.toType().abiSize(mod));
const param_alignment = ty.toType().abiAlignment(mod);
stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment);
stack_offset = @intCast(param_alignment.forward(stack_offset));
result_arg.* = .{ .stack_argument_offset = stack_offset };
stack_offset += param_size;
} else {
+2 -2
View File
@@ -47,7 +47,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
const field_ty = ty.structFieldType(i, mod);
const field_alignment = ty.structFieldAlign(i, mod);
const field_size = field_ty.bitSize(mod);
if (field_size > 32 or field_alignment > 32) {
if (field_size > 32 or field_alignment.compare(.gt, .@"32")) {
return Class.arrSize(bit_size, 64);
}
}
@@ -66,7 +66,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
if (field_ty.toType().bitSize(mod) > 32 or
mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)) > 32)
mod.unionFieldNormalAlignment(union_obj, @intCast(field_index)).compare(.gt, .@"32"))
{
return Class.arrSize(bit_size, 64);
}
+9 -10
View File
@@ -23,6 +23,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
const codegen = @import("../../codegen.zig");
const Alignment = InternPool.Alignment;
const CodeGenError = codegen.CodeGenError;
const Result = codegen.Result;
@@ -53,7 +54,7 @@ ret_mcv: MCValue,
fn_type: Type,
arg_index: usize,
src_loc: Module.SrcLoc,
stack_align: u32,
stack_align: Alignment,
/// MIR Instructions
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
@@ -788,11 +789,10 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
try table.ensureUnusedCapacity(self.gpa, additional_count);
}
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
if (abi_align > self.stack_align)
self.stack_align = abi_align;
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 {
self.stack_align = self.stack_align.max(abi_align);
// TODO find a free slot instead of always appending
const offset = mem.alignForward(u32, self.next_stack_offset, abi_align);
const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset));
self.next_stack_offset = offset + abi_size;
if (self.next_stack_offset > self.max_end_stack)
self.max_end_stack = self.next_stack_offset;
@@ -822,8 +822,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(mod);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
self.stack_align = self.stack_align.max(abi_align);
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
@@ -2602,7 +2601,7 @@ const CallMCValues = struct {
args: []MCValue,
return_value: MCValue,
stack_byte_count: u32,
stack_align: u32,
stack_align: Alignment,
fn deinit(self: *CallMCValues, func: *Self) void {
func.gpa.free(self.args);
@@ -2632,7 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
assert(result.args.len == 0);
result.return_value = .{ .unreach = {} };
result.stack_byte_count = 0;
result.stack_align = 1;
result.stack_align = .@"1";
return result;
},
.Unspecified, .C => {
@@ -2671,7 +2670,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
}
result.stack_byte_count = next_stack_offset;
result.stack_align = 16;
result.stack_align = .@"16";
},
else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}),
}
+14 -21
View File
@@ -24,6 +24,7 @@ const CodeGenError = codegen.CodeGenError;
const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Endian = std.builtin.Endian;
const Alignment = InternPool.Alignment;
const build_options = @import("build_options");
@@ -62,7 +63,7 @@ ret_mcv: MCValue,
fn_type: Type,
arg_index: usize,
src_loc: Module.SrcLoc,
stack_align: u32,
stack_align: Alignment,
/// MIR Instructions
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
@@ -227,7 +228,7 @@ const CallMCValues = struct {
args: []MCValue,
return_value: MCValue,
stack_byte_count: u32,
stack_align: u32,
stack_align: Alignment,
fn deinit(self: *CallMCValues, func: *Self) void {
func.gpa.free(self.args);
@@ -424,7 +425,7 @@ fn gen(self: *Self) !void {
// Backpatch stack offset
const total_stack_size = self.max_end_stack + abi.stack_reserved_area;
const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align);
const stack_size = self.stack_align.forward(total_stack_size);
if (math.cast(i13, stack_size)) |size| {
self.mir_instructions.set(save_inst, .{
.tag = .save,
@@ -880,11 +881,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType(mod);
const array_len = @as(u32, @intCast(array_ty.arrayLen(mod)));
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
const ptr_bytes = 8;
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len });
break :result MCValue{ .stack_offset = stack_offset };
@@ -2438,11 +2436,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.typeOf(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.typeOf(bin_op.rhs);
const ptr_bits = self.target.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
const ptr_bytes = 8;
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, .@"8");
try self.genSetStack(ptr_ty, stack_offset, ptr);
try self.genSetStack(len_ty, stack_offset - ptr_bytes, len);
break :result MCValue{ .stack_offset = stack_offset };
@@ -2782,11 +2777,10 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
return result_index;
}
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 {
if (abi_align > self.stack_align)
self.stack_align = abi_align;
fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: Alignment) !u32 {
self.stack_align = self.stack_align.max(abi_align);
// TODO find a free slot instead of always appending
const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size;
const offset: u32 = @intCast(abi_align.forward(self.next_stack_offset) + abi_size);
self.next_stack_offset = offset;
if (self.next_stack_offset > self.max_end_stack)
self.max_end_stack = self.next_stack_offset;
@@ -2825,8 +2819,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(mod);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
self.stack_align = self.stack_align.max(abi_align);
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
@@ -4479,7 +4472,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
assert(result.args.len == 0);
result.return_value = .{ .unreach = {} };
result.stack_byte_count = 0;
result.stack_align = 1;
result.stack_align = .@"1";
return result;
},
.Unspecified, .C => {
@@ -4521,7 +4514,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView)
}
result.stack_byte_count = next_stack_offset;
result.stack_align = 16;
result.stack_align = .@"16";
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
+88 -75
View File
@@ -25,6 +25,7 @@ const target_util = @import("../../target.zig");
const Mir = @import("Mir.zig");
const Emit = @import("Emit.zig");
const abi = @import("abi.zig");
const Alignment = InternPool.Alignment;
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;
@@ -709,7 +710,7 @@ stack_size: u32 = 0,
/// tool-conventions: https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
/// and also what the llvm backend will emit.
/// However, local variables or the usage of `@setAlignStack` can overwrite this default.
stack_alignment: u32 = 16,
stack_alignment: Alignment = .@"16",
// For each individual Wasm valtype we store a seperate free list which
// allows us to re-use locals that are no longer used. e.g. a temporary local.
@@ -991,6 +992,7 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32
/// Using a given `Type`, returns the corresponding type
fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
const target = mod.getTarget();
const ip = &mod.intern_pool;
return switch (ty.zigTypeTag(mod)) {
.Float => switch (ty.floatBits(target)) {
16 => wasm.Valtype.i32, // stored/loaded as u16
@@ -1005,12 +1007,12 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64;
break :blk wasm.Valtype.i32; // represented as pointer to stack
},
.Struct => switch (ty.containerLayout(mod)) {
.Packed => {
const struct_obj = mod.typeToStruct(ty).?;
return typeToValtype(struct_obj.backing_int_ty, mod);
},
else => wasm.Valtype.i32,
.Struct => {
if (mod.typeToPackedStruct(ty)) |packed_struct| {
return typeToValtype(packed_struct.backingIntType(ip).toType(), mod);
} else {
return wasm.Valtype.i32;
}
},
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.direct => wasm.Valtype.v128,
@@ -1285,12 +1287,12 @@ fn genFunc(func: *CodeGen) InnerError!void {
// store stack pointer so we can restore it when we return from the function
try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
// get the total stack size
const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment);
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(aligned_stack)) } });
// substract it from the current stack pointer
const aligned_stack = func.stack_alignment.forward(func.stack_size);
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(aligned_stack) } });
// subtract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
// Get negative stack aligment
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment)) * -1 } });
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } });
// Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@@ -1438,7 +1440,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value:
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = value.offset(),
.alignment = scalar_type.abiAlignment(mod),
.alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
});
}
},
@@ -1527,11 +1529,9 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
};
const abi_align = ty.abiAlignment(mod);
if (abi_align > func.stack_alignment) {
func.stack_alignment = abi_align;
}
func.stack_alignment = func.stack_alignment.max(abi_align);
const offset = std.mem.alignForward(u32, func.stack_size, abi_align);
const offset: u32 = @intCast(abi_align.forward(func.stack_size));
defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1560,11 +1560,9 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
pointee_ty.fmt(mod), pointee_ty.abiSize(mod),
});
};
if (abi_alignment > func.stack_alignment) {
func.stack_alignment = abi_alignment;
}
func.stack_alignment = func.stack_alignment.max(abi_alignment);
const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment);
const offset: u32 = @intCast(abi_alignment.forward(func.stack_size));
defer func.stack_size = offset + abi_size;
return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
@@ -1749,10 +1747,8 @@ fn isByRef(ty: Type, mod: *Module) bool {
return ty.hasRuntimeBitsIgnoreComptime(mod);
},
.Struct => {
if (mod.typeToStruct(ty)) |struct_obj| {
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
return isByRef(struct_obj.backing_int_ty, mod);
}
if (mod.typeToPackedStruct(ty)) |packed_struct| {
return isByRef(packed_struct.backingIntType(ip).toType(), mod);
}
return ty.hasRuntimeBitsIgnoreComptime(mod);
},
@@ -2120,7 +2116,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
.alignment = scalar_type.abiAlignment(mod),
.alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
});
},
else => try func.emitWValue(operand),
@@ -2385,19 +2381,19 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
},
.Vector => switch (determineSimdStoreStrategy(ty, mod)) {
.unrolled => {
const len = @as(u32, @intCast(abi_size));
const len: u32 = @intCast(abi_size);
return func.memcpy(lhs, rhs, .{ .imm32 = len });
},
.direct => {
try func.emitWValue(lhs);
try func.lowerToStack(rhs);
// TODO: Add helper functions for simd opcodes
const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
const extra_index: u32 = @intCast(func.mir_extra.items.len);
// stores as := opcode, offset, alignment (opcode::memarg)
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
ty.abiAlignment(mod),
@intCast(ty.abiAlignment(mod).toByteUnits(0)),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@@ -2451,7 +2447,10 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
// store rhs value at stack pointer's location in memory
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
.{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) },
.{
.offset = offset + lhs.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
},
);
}
@@ -2510,7 +2509,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
ty.abiAlignment(mod),
@intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
@@ -2526,7 +2525,10 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.addMemArg(
Mir.Inst.Tag.fromOpcode(opcode),
.{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) },
.{
.offset = offset + operand.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
},
);
return WValue{ .stack = {} };
@@ -3023,10 +3025,10 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
else => blk: {
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
if (layout.payload_size == 0) break :blk 0;
if (layout.payload_align > layout.tag_align) break :blk 0;
if (layout.payload_align.compare(.gt, layout.tag_align)) break :blk 0;
// tag is stored first so calculate offset from where payload starts
break :blk @as(u32, @intCast(std.mem.alignForward(u64, layout.tag_size, layout.tag_align)));
break :blk layout.tag_align.forward(layout.tag_size);
},
},
.Pointer => switch (parent_ty.ptrSize(mod)) {
@@ -3103,8 +3105,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
return @as(WantedT, @intCast(result));
}
/// This function is intended to assert that `isByRef` returns `false` for `ty`.
/// However such an assertion fails on the behavior tests currently.
fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
// TODO: enable this assertion
//assert(!isByRef(ty, mod));
const ip = &mod.intern_pool;
var val = arg_val;
switch (ip.indexToKey(val.ip_index)) {
@@ -3235,16 +3241,18 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
val.writeToMemory(ty, mod, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
.struct_type, .anon_struct_type => {
const struct_obj = mod.typeToStruct(ty).?;
assert(struct_obj.layout == .Packed);
.struct_type => |struct_type| {
// non-packed structs are not handled in this function because they
// are by-ref types.
assert(struct_type.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
const backing_int_ty = struct_type.backingIntType(ip).toType();
const int_val = try mod.intValue(
struct_obj.backing_int_ty,
std.mem.readIntLittle(u64, &buf),
backing_int_ty,
mem.readIntLittle(u64, &buf),
);
return func.lowerConstant(int_val, struct_obj.backing_int_ty);
return func.lowerConstant(int_val, backing_int_ty);
},
else => unreachable,
},
@@ -3269,6 +3277,7 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.options.module.?;
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
.Int, .Enum => switch (ty.intInfo(mod).bits) {
@@ -3298,9 +3307,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
return WValue{ .imm32 = 0xaaaaaaaa };
},
.Struct => {
const struct_obj = mod.typeToStruct(ty).?;
assert(struct_obj.layout == .Packed);
return func.emitUndefined(struct_obj.backing_int_ty);
const packed_struct = mod.typeToPackedStruct(ty).?;
return func.emitUndefined(packed_struct.backingIntType(ip).toType());
},
else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}),
}
@@ -3340,7 +3348,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
.lazy_align => |ty| @as(i32, @bitCast(ty.toType().abiAlignment(mod))),
.lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiAlignment(mod).toByteUnits(0))))),
.lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(ty.toType().abiSize(mod))))),
};
}
@@ -3757,6 +3765,7 @@ fn structFieldPtr(
fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -3769,9 +3778,9 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = switch (struct_ty.containerLayout(mod)) {
.Packed => switch (struct_ty.zigTypeTag(mod)) {
.Struct => result: {
const struct_obj = mod.typeToStruct(struct_ty).?;
const offset = struct_obj.packedFieldBitOffset(mod, field_index);
const backing_ty = struct_obj.backing_int_ty;
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = packed_struct.backingIntType(ip).toType();
const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse {
return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
};
@@ -3793,7 +3802,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const truncated = try func.trunc(shifted_value, int_type, backing_ty);
const bitcasted = try func.bitcast(field_ty, int_type, truncated);
break :result try bitcasted.toLocal(func, field_ty);
} else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) {
} else if (field_ty.isPtrAtRuntime(mod) and packed_struct.field_types.len == 1) {
// In this case we do not have to perform any transformations,
// we can simply reuse the operand.
break :result func.reuseOperand(struct_field.struct_operand, operand);
@@ -4053,7 +4062,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
.alignment = Type.anyerror.abiAlignment(mod),
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
});
}
@@ -4141,7 +4150,10 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void
try func.emitWValue(err_union);
try func.addImm32(0);
const err_val_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
try func.addMemArg(.i32_store16, .{
.offset = err_union.offset() + err_val_offset,
.alignment = 2,
});
break :result err_union;
};
func.finishAir(inst, result, &.{ty_op.operand});
@@ -4977,7 +4989,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
elem_ty.abiAlignment(mod),
@intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
@@ -5065,7 +5077,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
std.wasm.simdOpcode(.i8x16_shuffle),
} ++ [1]u32{undefined} ** 4;
var lanes = std.mem.asBytes(operands[1..]);
var lanes = mem.asBytes(operands[1..]);
for (0..@as(usize, @intCast(mask_len))) |index| {
const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod);
const base_index = if (mask_elem >= 0)
@@ -5099,6 +5111,7 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const mod = func.bin_file.base.options.module.?;
const ip = &mod.intern_pool;
const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
const result_ty = func.typeOfIndex(inst);
const len = @as(usize, @intCast(result_ty.arrayLen(mod)));
@@ -5150,13 +5163,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (isByRef(result_ty, mod)) {
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
}
const struct_obj = mod.typeToStruct(result_ty).?;
const fields = struct_obj.fields.values();
const backing_type = struct_obj.backing_int_ty;
const packed_struct = mod.typeToPackedStruct(result_ty).?;
const field_types = packed_struct.field_types;
const backing_type = packed_struct.backingIntType(ip).toType();
// ensure the result is zero'd
const result = try func.allocLocal(backing_type);
if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
if (backing_type.bitSize(mod) <= 32)
try func.addImm32(0)
else
try func.addImm64(0);
@@ -5164,22 +5177,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var current_bit: u16 = 0;
for (elements, 0..) |elem, elem_index| {
const field = fields[elem_index];
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_ty = field_types.get(ip)[elem_index].toType();
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32)
const shift_val = if (backing_type.bitSize(mod) <= 32)
WValue{ .imm32 = current_bit }
else
WValue{ .imm64 = current_bit };
const value = try func.resolveInst(elem);
const value_bit_size = @as(u16, @intCast(field.ty.bitSize(mod)));
const value_bit_size: u16 = @intCast(field_ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, value_bit_size);
// load our current result on stack so we can perform all transformations
// using only stack values. Saving the cost of loads and stores.
try func.emitWValue(result);
const bitcasted = try func.bitcast(int_ty, field.ty, value);
const bitcasted = try func.bitcast(int_ty, field_ty, value);
const extended_val = try func.intcast(bitcasted, int_ty, backing_type);
// no need to shift any values when the current offset is 0
const shifted = if (current_bit != 0) shifted: {
@@ -5199,7 +5212,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_index, mod);
const elem_size = @as(u32, @intCast(elem_ty.abiSize(mod)));
const elem_size: u32 = @intCast(elem_ty.abiSize(mod));
const value = try func.resolveInst(elem);
try func.store(offset, value, elem_ty, 0);
@@ -5256,7 +5269,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (isByRef(union_ty, mod)) {
const result_ptr = try func.allocStack(union_ty);
const payload = try func.resolveInst(extra.init);
if (layout.tag_align >= layout.payload_align) {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
if (isByRef(field_ty, mod)) {
const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
try func.store(payload_ptr, payload, field_ty, 0);
@@ -5420,9 +5433,9 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
const offset = if (layout.tag_align < layout.payload_align) blk: {
break :blk @as(u32, @intCast(layout.payload_size));
} else @as(u32, 0);
const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
break :blk @intCast(layout.payload_size);
} else 0;
try func.store(union_ptr, new_tag, tag_ty, offset);
func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
@@ -5439,9 +5452,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const operand = try func.resolveInst(ty_op.operand);
// when the tag alignment is smaller than the payload, the field will be stored
// after the payload.
const offset = if (layout.tag_align < layout.payload_align) blk: {
break :blk @as(u32, @intCast(layout.payload_size));
} else @as(u32, 0);
const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
break :blk @intCast(layout.payload_size);
} else 0;
const tag = try func.load(operand, tag_ty, offset);
const result = try tag.toLocal(func, tag_ty);
func.finishAir(inst, result, &.{ty_op.operand});
@@ -6366,7 +6379,7 @@ fn lowerTry(
const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
.alignment = Type.anyerror.abiAlignment(mod),
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
});
}
try func.addTag(.i32_eqz);
@@ -7287,7 +7300,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
.alignment = ty.abiAlignment(mod),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
@@ -7349,7 +7362,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = ty.abiAlignment(mod),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
} else {
_ = try func.load(ptr, ty, 0);
@@ -7410,7 +7423,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
.alignment = ty.abiAlignment(mod),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
},
);
const select_res = try func.allocLocal(ty);
@@ -7470,7 +7483,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = ty.abiAlignment(mod),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@@ -7566,7 +7579,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = ty.abiAlignment(mod),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
});
} else {
try func.store(ptr, operand, ty, 0);
+17 -19
View File
@@ -28,20 +28,22 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
switch (ty.zigTypeTag(mod)) {
.Struct => {
if (ty.containerLayout(mod) == .Packed) {
const struct_type = mod.typeToStruct(ty).?;
if (struct_type.layout == .Packed) {
if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
// When the struct type is non-scalar
if (ty.structFieldCount(mod) > 1) return memory;
// When the struct's alignment is non-natural
const field = ty.structFields(mod).values()[0];
if (field.abi_align != .none) {
if (field.abi_align.toByteUnitsOptional().? > field.ty.abiAlignment(mod)) {
return memory;
}
if (struct_type.field_types.len > 1) {
// The struct type is non-scalar.
return memory;
}
return classifyType(field.ty, mod);
const field_ty = struct_type.field_types.get(ip)[0].toType();
const explicit_align = struct_type.fieldAlign(ip, 0);
if (explicit_align != .none) {
if (explicit_align.compareStrict(.gt, field_ty.abiAlignment(mod)))
return memory;
}
return classifyType(field_ty, mod);
},
.Int, .Enum, .ErrorSet, .Vector => {
const int_bits = ty.intInfo(mod).bits;
@@ -101,15 +103,11 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Struct => {
switch (ty.containerLayout(mod)) {
.Packed => {
const struct_obj = mod.typeToStruct(ty).?;
return scalarType(struct_obj.backing_int_ty, mod);
},
else => {
assert(ty.structFieldCount(mod) == 1);
return scalarType(ty.structFieldType(0, mod), mod);
},
if (mod.typeToPackedStruct(ty)) |packed_struct| {
return scalarType(packed_struct.backingIntType(ip).toType(), mod);
} else {
assert(ty.structFieldCount(mod) == 1);
return scalarType(ty.structFieldType(0, mod), mod);
}
},
.Union => {
+61 -58
View File
@@ -27,6 +27,7 @@ const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const InternPool = @import("../../InternPool.zig");
const Alignment = InternPool.Alignment;
const Target = std.Target;
const Type = @import("../../type.zig").Type;
const TypedValue = @import("../../TypedValue.zig");
@@ -607,19 +608,21 @@ const InstTracking = struct {
const FrameAlloc = struct {
abi_size: u31,
abi_align: u5,
abi_align: Alignment,
ref_count: u16,
fn init(alloc_abi: struct { size: u64, alignment: u32 }) FrameAlloc {
assert(math.isPowerOfTwo(alloc_abi.alignment));
fn init(alloc_abi: struct { size: u64, alignment: Alignment }) FrameAlloc {
return .{
.abi_size = @intCast(alloc_abi.size),
.abi_align = math.log2_int(u32, alloc_abi.alignment),
.abi_align = alloc_abi.alignment,
.ref_count = 0,
};
}
fn initType(ty: Type, mod: *Module) FrameAlloc {
return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) });
return init(.{
.size = ty.abiSize(mod),
.alignment = ty.abiAlignment(mod),
});
}
};
@@ -702,12 +705,12 @@ pub fn generate(
@intFromEnum(FrameIndex.stack_frame),
FrameAlloc.init(.{
.size = 0,
.alignment = @intCast(func.analysis(ip).stack_alignment.toByteUnitsOptional() orelse 1),
.alignment = func.analysis(ip).stack_alignment.max(.@"1"),
}),
);
function.frame_allocs.set(
@intFromEnum(FrameIndex.call_frame),
FrameAlloc.init(.{ .size = 0, .alignment = 1 }),
FrameAlloc.init(.{ .size = 0, .alignment = .@"1" }),
);
const fn_info = mod.typeToFunc(fn_type).?;
@@ -729,15 +732,21 @@ pub fn generate(
function.ret_mcv = call_info.return_value;
function.frame_allocs.set(@intFromEnum(FrameIndex.ret_addr), FrameAlloc.init(.{
.size = Type.usize.abiSize(mod),
.alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align),
.alignment = Type.usize.abiAlignment(mod).min(call_info.stack_align),
}));
function.frame_allocs.set(@intFromEnum(FrameIndex.base_ptr), FrameAlloc.init(.{
.size = Type.usize.abiSize(mod),
.alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align),
.alignment = Alignment.min(
call_info.stack_align,
Alignment.fromNonzeroByteUnits(bin_file.options.target.stackAlignment()),
),
}));
function.frame_allocs.set(
@intFromEnum(FrameIndex.args_frame),
FrameAlloc.init(.{ .size = call_info.stack_byte_count, .alignment = call_info.stack_align }),
FrameAlloc.init(.{
.size = call_info.stack_byte_count,
.alignment = call_info.stack_align,
}),
);
function.gen() catch |err| switch (err) {
@@ -2156,8 +2165,8 @@ fn setFrameLoc(
) void {
const frame_i = @intFromEnum(frame_index);
if (aligned) {
const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i];
offset.* = mem.alignForward(i32, offset.*, alignment);
const alignment = self.frame_allocs.items(.abi_align)[frame_i];
offset.* = @intCast(alignment.forward(@intCast(offset.*)));
}
self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* });
offset.* += self.frame_allocs.items(.abi_size)[frame_i];
@@ -2179,7 +2188,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
const SortContext = struct {
frame_align: @TypeOf(frame_align),
pub fn lessThan(context: @This(), lhs: FrameIndex, rhs: FrameIndex) bool {
return context.frame_align[@intFromEnum(lhs)] > context.frame_align[@intFromEnum(rhs)];
return context.frame_align[@intFromEnum(lhs)].compare(.gt, context.frame_align[@intFromEnum(rhs)]);
}
};
const sort_context = SortContext{ .frame_align = frame_align };
@@ -2189,8 +2198,8 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
const call_frame_align = frame_align[@intFromEnum(FrameIndex.call_frame)];
const stack_frame_align = frame_align[@intFromEnum(FrameIndex.stack_frame)];
const args_frame_align = frame_align[@intFromEnum(FrameIndex.args_frame)];
const needed_align = @max(call_frame_align, stack_frame_align);
const need_align_stack = needed_align > args_frame_align;
const needed_align = call_frame_align.max(stack_frame_align);
const need_align_stack = needed_align.compare(.gt, args_frame_align);
// Create list of registers to save in the prologue.
// TODO handle register classes
@@ -2214,21 +2223,21 @@ fn computeFrameLayout(self: *Self) !FrameLayout {
self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true);
for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true);
rsp_offset += stack_frame_align_offset;
rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align);
rsp_offset = @intCast(needed_align.forward(@intCast(rsp_offset)));
rsp_offset -= stack_frame_align_offset;
frame_size[@intFromEnum(FrameIndex.call_frame)] =
@intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.stack_frame)]);
return .{
.stack_mask = @as(u32, math.maxInt(u32)) << (if (need_align_stack) needed_align else 0),
.stack_mask = @as(u32, math.maxInt(u32)) << @intCast(if (need_align_stack) @intFromEnum(needed_align) else 0),
.stack_adjust = @intCast(rsp_offset - frame_offset[@intFromEnum(FrameIndex.call_frame)]),
.save_reg_list = save_reg_list,
};
}
fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 {
const alloc_align = @as(u32, 1) << self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
return @min(alloc_align, @as(u32, @bitCast(frame_addr.off)) & (alloc_align - 1));
fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) Alignment {
const alloc_align = self.frame_allocs.get(@intFromEnum(frame_addr.index)).abi_align;
return @enumFromInt(@min(@intFromEnum(alloc_align), @ctz(frame_addr.off)));
}
fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 {
@@ -2241,13 +2250,13 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex {
const frame_align = frame_allocs_slice.items(.abi_align);
const stack_frame_align = &frame_align[@intFromEnum(FrameIndex.stack_frame)];
stack_frame_align.* = @max(stack_frame_align.*, alloc.abi_align);
stack_frame_align.* = stack_frame_align.max(alloc.abi_align);
for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| {
const abi_size = frame_size[@intFromEnum(frame_index)];
if (abi_size != alloc.abi_size) continue;
const abi_align = &frame_align[@intFromEnum(frame_index)];
abi_align.* = @max(abi_align.*, alloc.abi_align);
abi_align.* = abi_align.max(alloc.abi_align);
_ = self.free_frame_indices.swapRemoveAt(free_i);
return frame_index;
@@ -2266,7 +2275,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex {
.size = math.cast(u32, val_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)});
},
.alignment = @max(ptr_ty.ptrAlignment(mod), 1),
.alignment = ptr_ty.ptrAlignment(mod).max(.@"1"),
}));
}
@@ -4266,7 +4275,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
};
defer if (tag_lock) |lock| self.register_manager.unlockReg(lock);
const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align < layout.payload_align) blk: {
const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) blk: {
// TODO reusing the operand
const reg = try self.copyToTmpRegister(ptr_union_ty, ptr);
try self.genBinOpMir(
@@ -4309,7 +4318,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
switch (operand) {
.load_frame => |frame_addr| {
if (tag_abi_size <= 8) {
const off: i32 = if (layout.tag_align < layout.payload_align)
const off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
@@ -4321,7 +4330,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement get_union_tag for ABI larger than 8 bytes and operand {}", .{operand});
},
.register => {
const shift: u6 = if (layout.tag_align < layout.payload_align)
const shift: u6 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size * 8)
else
0;
@@ -5600,8 +5609,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const src_mcv = try self.resolveInst(operand);
const field_off: u32 = switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => @intCast(container_ty.structFieldOffset(index, mod) * 8),
.Packed => if (mod.typeToStruct(container_ty)) |struct_obj|
struct_obj.packedFieldBitOffset(mod, index)
.Packed => if (mod.typeToStruct(container_ty)) |struct_type|
mod.structPackedFieldBitOffset(struct_type, index)
else
0,
};
@@ -8084,14 +8093,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// We need a properly aligned and sized call frame to be able to call this function.
{
const needed_call_frame =
FrameAlloc.init(.{ .size = info.stack_byte_count, .alignment = info.stack_align });
FrameAlloc.init(.{
.size = info.stack_byte_count,
.alignment = info.stack_align,
});
const frame_allocs_slice = self.frame_allocs.slice();
const stack_frame_size =
&frame_allocs_slice.items(.abi_size)[@intFromEnum(FrameIndex.call_frame)];
stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
const stack_frame_align =
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align);
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
}
try self.spillEflagsIfOccupied();
@@ -9944,7 +9956,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.indirect => try self.moveStrategy(ty, false),
.load_frame => |frame_addr| try self.moveStrategy(
ty,
self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod),
self.getFrameAddrAlignment(frame_addr).compare(.gte, ty.abiAlignment(mod)),
),
.lea_frame => .{ .move = .{ ._, .lea } },
else => unreachable,
@@ -9973,10 +9985,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.base = .{ .reg = .ds },
.disp = small_addr,
});
switch (try self.moveStrategy(ty, mem.isAlignedGeneric(
u32,
switch (try self.moveStrategy(ty, ty.abiAlignment(mod).check(
@as(u32, @bitCast(small_addr)),
ty.abiAlignment(mod),
))) {
.move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem),
.insert_extract => |ie| try self.asmRegisterMemoryImmediate(
@@ -10142,22 +10152,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
);
const src_alias = registerAlias(src_reg, abi_size);
switch (try self.moveStrategy(ty, switch (base) {
.none => mem.isAlignedGeneric(
u32,
@as(u32, @bitCast(disp)),
ty.abiAlignment(mod),
),
.none => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
.reg => |reg| switch (reg) {
.es, .cs, .ss, .ds => mem.isAlignedGeneric(
u32,
@as(u32, @bitCast(disp)),
ty.abiAlignment(mod),
),
.es, .cs, .ss, .ds => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
else => false,
},
.frame => |frame_index| self.getFrameAddrAlignment(
.{ .index = frame_index, .off = disp },
) >= ty.abiAlignment(mod),
).compare(.gte, ty.abiAlignment(mod)),
})) {
.move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias),
.insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate(
@@ -11079,7 +11081,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void {
stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size);
const stack_frame_align =
&frame_allocs_slice.items(.abi_align)[@intFromEnum(FrameIndex.call_frame)];
stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align);
stack_frame_align.* = stack_frame_align.max(needed_call_frame.abi_align);
}
try self.spillEflagsIfOccupied();
@@ -11418,13 +11420,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const frame_index =
try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod));
if (result_ty.containerLayout(mod) == .Packed) {
const struct_obj = mod.typeToStruct(result_ty).?;
const struct_type = mod.typeToStruct(result_ty).?;
try self.genInlineMemset(
.{ .lea_frame = .{ .index = frame_index } },
.{ .immediate = 0 },
.{ .immediate = result_ty.abiSize(mod) },
);
for (elements, 0..) |elem, elem_i| {
for (elements, 0..) |elem, elem_i_usize| {
const elem_i: u32 = @intCast(elem_i_usize);
if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue;
const elem_ty = result_ty.structFieldType(elem_i, mod);
@@ -11437,7 +11440,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
}
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(mod));
const elem_abi_bits = elem_abi_size * 8;
const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i);
const elem_off = mod.structPackedFieldBitOffset(struct_type, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@@ -11576,13 +11579,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
const tag_int = tag_int_val.toUnsignedInt(mod);
const tag_off: i32 = if (layout.tag_align < layout.payload_align)
const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int });
const pl_off: i32 = if (layout.tag_align < layout.payload_align)
const pl_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
0
else
@intCast(layout.tag_size);
@@ -11823,7 +11826,7 @@ const CallMCValues = struct {
args: []MCValue,
return_value: InstTracking,
stack_byte_count: u31,
stack_align: u31,
stack_align: Alignment,
fn deinit(self: *CallMCValues, func: *Self) void {
func.gpa.free(self.args);
@@ -11867,12 +11870,12 @@ fn resolveCallingConventionValues(
.Naked => {
assert(result.args.len == 0);
result.return_value = InstTracking.init(.unreach);
result.stack_align = 8;
result.stack_align = .@"8";
},
.C => {
var param_reg_i: usize = 0;
var param_sse_reg_i: usize = 0;
result.stack_align = 16;
result.stack_align = .@"16";
switch (self.target.os.tag) {
.windows => {
@@ -11957,7 +11960,7 @@ fn resolveCallingConventionValues(
}
const param_size: u31 = @intCast(ty.abiSize(mod));
const param_align: u31 = @intCast(ty.abiAlignment(mod));
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -11968,7 +11971,7 @@ fn resolveCallingConventionValues(
}
},
.Unspecified => {
result.stack_align = 16;
result.stack_align = .@"16";
// Return values
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
@@ -11997,7 +12000,7 @@ fn resolveCallingConventionValues(
continue;
}
const param_size: u31 = @intCast(ty.abiSize(mod));
const param_align: u31 = @intCast(ty.abiAlignment(mod));
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@@ -12010,7 +12013,7 @@ fn resolveCallingConventionValues(
else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}),
}
result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align);
result.stack_byte_count = @intCast(result.stack_align.forward(result.stack_byte_count));
return result;
}
+14 -24
View File
@@ -210,8 +210,9 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const struct_type = mod.typeToStruct(ty).?;
const ty_size = ty.abiSize(mod);
if (ty.containerLayout(mod) == .Packed) {
if (struct_type.layout == .Packed) {
assert(ty_size <= 128);
result[0] = .integer;
if (ty_size > 64) result[1] = .integer;
@@ -222,15 +223,13 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
var result_i: usize = 0; // out of 8
var byte_i: usize = 0; // out of 8
const fields = ty.structFields(mod);
for (fields.values()) |field| {
if (field.abi_align != .none) {
if (field.abi_align.toByteUnitsOptional().? < field.ty.abiAlignment(mod)) {
return memory_class;
}
}
const field_size = field.ty.abiSize(mod);
const field_class_array = classifySystemV(field.ty, mod, .other);
for (struct_type.field_types.get(ip), 0..) |field_ty_ip, i| {
const field_ty = field_ty_ip.toType();
const field_align = struct_type.fieldAlign(ip, i);
if (field_align != .none and field_align.compare(.lt, field_ty.abiAlignment(mod)))
return memory_class;
const field_size = field_ty.abiSize(mod);
const field_class_array = classifySystemV(field_ty, mod, .other);
const field_class = std.mem.sliceTo(&field_class_array, .none);
if (byte_i + field_size <= 8) {
// Combine this field with the previous one.
@@ -341,10 +340,11 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
return memory_class;
for (union_obj.field_types.get(ip), 0..) |field_ty, field_index| {
if (union_obj.fieldAlign(ip, @intCast(field_index)).toByteUnitsOptional()) |a| {
if (a < field_ty.toType().abiAlignment(mod)) {
return memory_class;
}
const field_align = union_obj.fieldAlign(ip, @intCast(field_index));
if (field_align != .none and
field_align.compare(.lt, field_ty.toType().abiAlignment(mod)))
{
return memory_class;
}
// Combine this field with the previous one.
const field_class = classifySystemV(field_ty.toType(), mod, .other);
@@ -533,13 +533,3 @@ const Register = @import("bits.zig").Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Value = @import("../../value.zig").Value;
fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
return .{
.ty = Type.initTag(tag),
.default_val = Value.initTag(.unreachable_value),
.abi_align = 0,
.offset = offset,
.is_comptime = false,
};
}
+54 -47
View File
@@ -22,6 +22,7 @@ const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const Value = @import("value.zig").Value;
const Zir = @import("Zir.zig");
const Alignment = InternPool.Alignment;
pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value ok.
@@ -116,7 +117,8 @@ pub fn generateLazySymbol(
bin_file: *link.File,
src_loc: Module.SrcLoc,
lazy_sym: link.File.LazySymbol,
alignment: *u32,
// TODO don't use an "out" parameter like this; put it in the result instead
alignment: *Alignment,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
@@ -141,7 +143,7 @@ pub fn generateLazySymbol(
}
if (lazy_sym.ty.isAnyError(mod)) {
alignment.* = 4;
alignment.* = .@"4";
const err_names = mod.global_error_set.keys();
mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian);
var offset = code.items.len;
@@ -157,7 +159,7 @@ pub fn generateLazySymbol(
mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
return Result.ok;
} else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) {
alignment.* = 1;
alignment.* = .@"1";
for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| {
const tag_name = mod.intern_pool.stringToSlice(tag_name_ip);
try code.ensureUnusedCapacity(tag_name.len + 1);
@@ -273,7 +275,7 @@ pub fn generateSymbol(
const abi_align = typed_value.ty.abiAlignment(mod);
// error value first when its type is larger than the error union's payload
if (error_align > payload_align) {
if (error_align.order(payload_align) == .gt) {
try code.writer().writeInt(u16, err_val, endian);
}
@@ -291,7 +293,7 @@ pub fn generateSymbol(
.fail => |em| return .{ .fail = em },
}
const unpadded_end = code.items.len - begin;
const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
@@ -300,11 +302,11 @@ pub fn generateSymbol(
}
// Payload size is larger than error set, so emit our error set last
if (error_align <= payload_align) {
if (error_align.compare(.lte, payload_align)) {
const begin = code.items.len;
try code.writer().writeInt(u16, err_val, endian);
const unpadded_end = code.items.len - begin;
const padded_end = mem.alignForward(u64, unpadded_end, abi_align);
const padded_end = abi_align.forward(unpadded_end);
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
if (padding > 0) {
@@ -474,23 +476,18 @@ pub fn generateSymbol(
}
}
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
const fields = struct_obj.fields.values();
.struct_type => |struct_type| switch (struct_type.layout) {
.Packed => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
for (fields, 0..) |field, index| {
const field_ty = field.ty;
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
@@ -499,48 +496,51 @@ pub fn generateSymbol(
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (field_ty.zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse
if (field_ty.toType().zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, field_ty.toType().abiSize(mod)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.ty = field_ty.toType(),
.val = field_val.toValue(),
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
field_val.toValue().writeToPackedMemory(field_ty.toType(), mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @as(u16, @intCast(field_ty.bitSize(mod)));
bits += @as(u16, @intCast(field_ty.toType().bitSize(mod)));
}
} else {
},
.Auto, .Extern => {
const struct_begin = code.items.len;
const fields = struct_obj.fields.values();
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
var it = typed_value.ty.iterateStructOffsets(mod);
while (it.next()) |field_offset| {
const field_ty = fields[field_offset.field].ty;
if (!field_ty.hasRuntimeBits(mod)) continue;
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_offset.field] },
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_offset.field],
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const padding = math.cast(usize, field_offset.offset - (code.items.len - struct_begin)) orelse return error.Overflow;
const padding = math.cast(
usize,
offsets[field_index] - (code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.ty = field_ty.toType(),
.val = field_val.toValue(),
}, code, debug_output, reloc_info)) {
.ok => {},
@@ -548,9 +548,16 @@ pub fn generateSymbol(
}
}
const padding = math.cast(usize, std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)) - (code.items.len - struct_begin)) orelse return error.Overflow;
const size = struct_type.size(ip).*;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
const padding = math.cast(
usize,
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
}
},
},
else => unreachable,
},
@@ -565,7 +572,7 @@ pub fn generateSymbol(
}
// Check if we should store the tag first.
if (layout.tag_size > 0 and layout.tag_align >= layout.payload_align) {
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = typed_value.ty.unionTagType(mod).?,
.val = un.tag.toValue(),
@@ -595,7 +602,7 @@ pub fn generateSymbol(
}
}
if (layout.tag_size > 0 and layout.tag_align < layout.payload_align) {
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
switch (try generateSymbol(bin_file, src_loc, .{
.ty = union_obj.enum_tag_ty.toType(),
.val = un.tag.toValue(),
@@ -695,9 +702,9 @@ fn lowerParentPtr(
@intCast(field.index),
mod,
)),
.Packed => if (mod.typeToStruct(base_type.toType())) |struct_obj|
math.divExact(u16, struct_obj.packedFieldBitOffset(
mod,
.Packed => if (mod.typeToStruct(base_type.toType())) |struct_type|
math.divExact(u16, mod.structPackedFieldBitOffset(
struct_type,
@intCast(field.index),
), 8) catch |err| switch (err) {
error.UnexpectedRemainder => 0,
@@ -844,12 +851,12 @@ fn genDeclRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (tv.ty.castPtrToFn(mod)) |fn_ty| {
if (mod.typeToFunc(fn_ty).?.is_generic) {
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) });
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod).toByteUnitsOptional().? });
}
} else if (tv.ty.zigTypeTag(mod) == .Pointer) {
const elem_ty = tv.ty.elemType2(mod);
if (!elem_ty.hasRuntimeBits(mod)) {
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) });
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod).toByteUnitsOptional().? });
}
}
@@ -1036,10 +1043,10 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
const payload_align = payload_ty.abiAlignment(mod);
const error_align = Type.anyerror.abiAlignment(mod);
if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return 0;
} else {
return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align);
return payload_align.forward(Type.anyerror.abiSize(mod));
}
}
@@ -1047,8 +1054,8 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 {
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0;
const payload_align = payload_ty.abiAlignment(mod);
const error_align = Type.anyerror.abiAlignment(mod);
if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return mem.alignForward(u64, payload_ty.abiSize(mod), error_align);
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return error_align.forward(payload_ty.abiSize(mod));
} else {
return 0;
}
+143 -145
View File
@@ -17,6 +17,7 @@ const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const InternPool = @import("../InternPool.zig");
const Alignment = InternPool.Alignment;
const BigIntLimb = std.math.big.Limb;
const BigInt = std.math.big.int;
@@ -292,7 +293,7 @@ pub const Function = struct {
const result: CValue = if (lowersToArray(ty, mod)) result: {
const writer = f.object.code_header.writer();
const alignment = 0;
const alignment: Alignment = .none;
const decl_c_value = try f.allocLocalValue(ty, alignment);
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -318,25 +319,25 @@ pub const Function = struct {
/// Skips the reuse logic. This function should be used for any persistent allocation, i.e.
/// those which go into `allocs`. This function does not add the resulting local into `allocs`;
/// that responsibility lies with the caller.
fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue {
fn allocLocalValue(f: *Function, ty: Type, alignment: Alignment) !CValue {
const mod = f.object.dg.module;
const gpa = f.object.dg.gpa;
try f.locals.append(gpa, .{
.cty_idx = try f.typeToIndex(ty, .complete),
.alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)),
});
return .{ .new_local = @as(LocalIndex, @intCast(f.locals.items.len - 1)) };
return .{ .new_local = @intCast(f.locals.items.len - 1) };
}
fn allocLocal(f: *Function, inst: Air.Inst.Index, ty: Type) !CValue {
const result = try f.allocAlignedLocal(ty, .{}, 0);
const result = try f.allocAlignedLocal(ty, .{}, .none);
log.debug("%{d}: allocating t{d}", .{ inst, result.new_local });
return result;
}
/// Only allocates the local; does not print anything. Will attempt to re-use locals, so should
/// not be used for persistent locals (i.e. those in `allocs`).
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue {
fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: Alignment) !CValue {
const mod = f.object.dg.module;
if (f.free_locals_map.getPtr(.{
.cty_idx = try f.typeToIndex(ty, .complete),
@@ -1299,139 +1300,134 @@ pub const DeclGen = struct {
}
try writer.writeByte('}');
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
switch (struct_obj.layout) {
.Auto, .Extern => {
if (!location.isInitializer()) {
.struct_type => |struct_type| switch (struct_type.layout) {
.Auto, .Extern => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
try writer.writeByte('{');
var empty = true;
for (0..struct_type.field_types.len) |field_i| {
const field_ty = struct_type.field_types.get(ip)[field_i].toType();
if (struct_type.fieldIsComptime(ip, field_i)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_i] },
} }),
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
try dg.renderValue(writer, field_ty, field_val.toValue(), initializer_type);
empty = false;
}
try writer.writeByte('}');
},
.Packed => {
const int_info = ty.intInfo(mod);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
const bit_offset_ty = try mod.intType(.unsigned, bits);
const field_types = struct_type.field_types.get(ip);
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
for (field_types) |field_ty| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
if (eff_num_fields == 0) {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
} else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try writer.writeAll("zig_or_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
try writer.writeByte('{');
var empty = true;
for (struct_obj.fields.values(), 0..) |field, field_i| {
if (field.is_comptime) continue;
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
var eff_index: usize = 0;
var needs_closing_paren = false;
for (field_types, 0..) |field_ty, field_i| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field.ty.toIntern(),
.ty = field_ty,
.storage = .{ .u64 = bytes[field_i] },
} }),
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type);
empty = false;
}
try writer.writeByte('}');
},
.Packed => {
const int_info = ty.intInfo(mod);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
const bit_offset_ty = try mod.intType(.unsigned, bits);
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
for (struct_obj.fields.values()) |field| {
if (field.is_comptime) continue;
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
if (eff_num_fields == 0) {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
} else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try writer.writeAll("zig_or_");
const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
}
var eff_index: usize = 0;
var needs_closing_paren = false;
for (struct_obj.fields.values(), 0..) |field, field_i| {
if (field.is_comptime) continue;
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field.ty.toIntern(),
.storage = .{ .u64 = bytes[field_i] },
} }),
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument);
}
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
bit_offset += field.ty.bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
} else {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (struct_obj.fields.values(), 0..) |field, field_i| {
if (field.is_comptime) continue;
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
try dg.renderType(writer, ty);
try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field.ty.toIntern(),
.storage = .{ .u64 = bytes[field_i] },
} }),
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
if (bit_offset != 0) {
try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
try dg.renderValue(writer, field.ty, field_val.toValue(), .Other);
}
bit_offset += field.ty.bitSize(mod);
empty = false;
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty.toType(), .FunctionArgument);
}
try writer.writeByte(')');
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
bit_offset += field_ty.toType().bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
},
}
} else {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (field_types, 0..) |field_ty, field_i| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_i] },
} }),
.elems => |elems| elems[field_i],
.repeated_elem => |elem| elem,
};
if (bit_offset != 0) {
try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), .Other);
}
bit_offset += field_ty.toType().bitSize(mod);
empty = false;
}
try writer.writeByte(')');
}
},
},
else => unreachable,
},
@@ -1723,7 +1719,7 @@ pub const DeclGen = struct {
ty: Type,
name: CValue,
qualifiers: CQualifiers,
alignment: u64,
alignment: Alignment,
kind: CType.Kind,
) error{ OutOfMemory, AnalysisFail }!void {
const mod = dg.module;
@@ -1854,7 +1850,7 @@ pub const DeclGen = struct {
decl.ty,
.{ .decl = decl_index },
CQualifiers.init(.{ .@"const" = variable.is_const }),
@as(u32, @intCast(decl.alignment.toByteUnits(0))),
decl.alignment,
.complete,
);
try fwd_decl_writer.writeAll(";\n");
@@ -2460,7 +2456,7 @@ pub fn genErrDecls(o: *Object) !void {
} });
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete);
try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, .none, .complete);
try writer.writeAll(" = ");
try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer);
try writer.writeAll(";\n");
@@ -2472,7 +2468,7 @@ pub fn genErrDecls(o: *Object) !void {
});
try writer.writeAll("static ");
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete);
try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, .none, .complete);
try writer.writeAll(" = {");
for (mod.global_error_set.keys(), 0..) |name_nts, value| {
const name = mod.intern_pool.stringToSlice(name_nts);
@@ -2523,7 +2519,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.writeByte(' ');
try w.writeAll(fn_name);
try w.writeByte('(');
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete);
try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, .none, .complete);
try w.writeAll(") {\n switch (tag) {\n");
for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| {
const index = @as(u32, @intCast(index_usize));
@@ -2546,7 +2542,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void {
try w.print(" case {}: {{\n static ", .{
try o.dg.fmtIntLiteral(enum_ty, int_val, .Other),
});
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete);
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
try w.writeAll(" = ");
try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer);
try w.writeAll(";\n return (");
@@ -2706,7 +2702,7 @@ pub fn genDecl(o: *Object) !void {
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment.toByteUnits(0), .complete);
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.alignment, .complete);
if (decl.@"linksection" != .none) try w.writeAll(", read, write)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer);
@@ -2717,14 +2713,14 @@ pub fn genDecl(o: *Object) !void {
const fwd_decl_writer = o.dg.fwd_decl.writer();
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, Const, decl.alignment, .complete);
try fwd_decl_writer.writeAll(";\n");
const w = o.writer();
if (!is_global) try w.writeAll("static ");
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
try w.print("zig_linksection(\"{s}\", ", .{s});
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment.toByteUnits(0), .complete);
try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.alignment, .complete);
if (decl.@"linksection" != .none) try w.writeAll(", read)");
try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
@@ -3353,8 +3349,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{ty_op.operand});
const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
alignment >= src_ty.abiAlignment(mod)
const is_aligned = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
else
true;
const is_array = lowersToArray(src_ty, mod);
@@ -3625,8 +3621,8 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
return .none;
}
const is_aligned = if (ptr_info.flags.alignment.toByteUnitsOptional()) |alignment|
alignment >= src_ty.abiAlignment(mod)
const is_aligned = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(mod))
else
true;
const is_array = lowersToArray(ptr_info.child.toType(), mod);
@@ -4847,7 +4843,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (is_reg) {
const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod);
try writer.writeAll("register ");
const alignment = 0;
const alignment: Alignment = .none;
const local_value = try f.allocLocalValue(output_ty, alignment);
try f.allocs.put(gpa, local_value.new_local, false);
try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete);
@@ -4880,7 +4876,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (asmInputNeedsLocal(f, constraint, input_val)) {
const input_ty = f.typeOf(input);
if (is_reg) try writer.writeAll("register ");
const alignment = 0;
const alignment: Alignment = .none;
const local_value = try f.allocLocalValue(input_ty, alignment);
try f.allocs.put(gpa, local_value.new_local, false);
try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete);
@@ -5230,7 +5226,8 @@ fn fieldLocation(
const container_ty = container_ptr_ty.childType(mod);
return switch (container_ty.zigTypeTag(mod)) {
.Struct => switch (container_ty.containerLayout(mod)) {
.Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| {
.Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index_usize| {
const next_field_index: u32 = @intCast(next_field_index_usize);
if (container_ty.structFieldIsComptime(next_field_index, mod)) continue;
const field_ty = container_ty.structFieldType(next_field_index, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -5238,7 +5235,7 @@ fn fieldLocation(
break .{ .field = if (container_ty.isSimpleTuple(mod))
.{ .field = next_field_index }
else
.{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } };
.{ .identifier = ip.stringToSlice(container_ty.legacyStructFieldName(next_field_index, mod)) } };
} else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin,
.Packed => if (field_ptr_ty.ptrInfo(mod).packed_offset.host_size == 0)
.{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) + @divExact(container_ptr_ty.ptrInfo(mod).packed_offset.bit_offset, 8) }
@@ -5425,14 +5422,14 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
.{ .field = extra.field_index }
else
.{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
.Packed => {
const struct_obj = mod.typeToStruct(struct_ty).?;
const struct_type = mod.typeToStruct(struct_ty).?;
const int_info = struct_ty.intInfo(mod);
const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index);
const bit_offset = mod.structPackedFieldBitOffset(struct_type, extra.field_index);
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
const field_int_signedness = if (inst_ty.isAbiInt(mod))
@@ -5487,7 +5484,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
.anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0)
.{ .field = extra.field_index }
else
.{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) },
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
.union_type => |union_type| field_name: {
const union_obj = ip.loadUnionType(union_type);
@@ -6820,7 +6817,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
}
},
.Struct => switch (inst_ty.containerLayout(mod)) {
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i| {
.Auto, .Extern => for (resolved_elements, 0..) |element, field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
if (inst_ty.structFieldIsComptime(field_i, mod)) continue;
const field_ty = inst_ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -6829,7 +6827,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod))
.{ .field = field_i }
else
.{ .identifier = ip.stringToSlice(inst_ty.structFieldName(field_i, mod)) });
.{ .identifier = ip.stringToSlice(inst_ty.legacyStructFieldName(field_i, mod)) });
try a.assign(f, writer);
try f.writeCValue(writer, element, .Other);
try a.end(f, writer);
+24 -15
View File
@@ -283,14 +283,20 @@ pub const CType = extern union {
@"align": Alignment,
abi: Alignment,
pub fn init(alignment: u64, abi_alignment: u32) AlignAs {
const @"align" = Alignment.fromByteUnits(alignment);
const abi_align = Alignment.fromNonzeroByteUnits(abi_alignment);
pub fn init(@"align": Alignment, abi_align: Alignment) AlignAs {
assert(abi_align != .none);
return .{
.@"align" = if (@"align" != .none) @"align" else abi_align,
.abi = abi_align,
};
}
pub fn initByteUnits(alignment: u64, abi_alignment: u32) AlignAs {
return init(
Alignment.fromByteUnits(alignment),
Alignment.fromNonzeroByteUnits(abi_alignment),
);
}
pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
const abi_align = ty.abiAlignment(mod);
return init(abi_align, abi_align);
@@ -1360,6 +1366,7 @@ pub const CType = extern union {
pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
const mod = lookup.getModule();
const ip = &mod.intern_pool;
self.* = undefined;
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
@@ -1382,12 +1389,12 @@ pub const CType = extern union {
.array => switch (kind) {
.forward, .complete, .global => {
const abi_size = ty.abiSize(mod);
const abi_align = ty.abiAlignment(mod);
const abi_align = ty.abiAlignment(mod).toByteUnits(0);
self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
.len = @divExact(abi_size, abi_align),
.elem_type = tagFromIntInfo(.{
.signedness = .unsigned,
.bits = @as(u16, @intCast(abi_align * 8)),
.bits = @intCast(abi_align * 8),
}).toIndex(),
} } };
self.value = .{ .cty = initPayload(&self.storage.seq) };
@@ -1488,10 +1495,10 @@ pub const CType = extern union {
},
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
if (mod.typeToStruct(ty)) |struct_obj| {
try self.initType(struct_obj.backing_int_ty, kind, lookup);
if (mod.typeToPackedStruct(ty)) |packed_struct| {
try self.initType(packed_struct.backingIntType(ip).toType(), kind, lookup);
} else {
const bits = @as(u16, @intCast(ty.bitSize(mod)));
const bits: u16 = @intCast(ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, bits);
try self.initType(int_ty, kind, lookup);
}
@@ -1722,7 +1729,6 @@ pub const CType = extern union {
.Fn => {
const info = mod.typeToFunc(ty).?;
const ip = &mod.intern_pool;
if (!info.is_generic) {
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
@@ -1947,7 +1953,8 @@ pub const CType = extern union {
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var c_field_i: usize = 0;
for (0..fields_len) |field_i| {
for (0..fields_len) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -1958,7 +1965,7 @@ pub const CType = extern union {
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
else => unreachable,
})),
@@ -2091,7 +2098,8 @@ pub const CType = extern union {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
else => unreachable,
}) |field_i| {
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -2110,7 +2118,7 @@ pub const CType = extern union {
std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable
else
ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
else => unreachable,
}),
@@ -2219,7 +2227,8 @@ pub const CType = extern union {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
else => unreachable,
}) |field_i| {
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
@@ -2234,7 +2243,7 @@ pub const CType = extern union {
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
else => unreachable,
}));
+341 -295
View File
@@ -833,7 +833,10 @@ pub const Object = struct {
/// When an LLVM struct type is created, an entry is inserted into this
/// table for every zig source field of the struct that has a corresponding
/// LLVM struct field. comptime fields and 0 bit fields are not included.
/// LLVM struct field. comptime fields are not included. Zero-bit fields are
/// mapped to a field at the correct byte, which may be a padding field, or
/// are not mapped, in which case they are sematically at the end of the
/// struct.
/// The value is the LLVM struct field index.
/// This is denormalized data.
struct_field_map: std.AutoHashMapUnmanaged(ZigStructField, c_uint),
@@ -1076,7 +1079,7 @@ pub const Object = struct {
table_variable_index.setMutability(.constant, &o.builder);
table_variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
table_variable_index.setAlignment(
Builder.Alignment.fromByteUnits(slice_ty.abiAlignment(mod)),
slice_ty.abiAlignment(mod).toLlvm(),
&o.builder,
);
@@ -1318,8 +1321,9 @@ pub const Object = struct {
_ = try attributes.removeFnAttr(.@"noinline");
}
if (func.analysis(ip).stack_alignment.toByteUnitsOptional()) |alignment| {
try attributes.addFnAttr(.{ .alignstack = Builder.Alignment.fromByteUnits(alignment) }, &o.builder);
const stack_alignment = func.analysis(ip).stack_alignment;
if (stack_alignment != .none) {
try attributes.addFnAttr(.{ .alignstack = stack_alignment.toLlvm() }, &o.builder);
try attributes.addFnAttr(.@"noinline", &o.builder);
} else {
_ = try attributes.removeFnAttr(.alignstack);
@@ -1407,7 +1411,7 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
if (isByRef(param_ty, mod)) {
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const param_llvm_ty = param.typeOfWip(&wip);
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1423,7 +1427,7 @@ pub const Object = struct {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
try o.addByRefParamAttrs(&attributes, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty);
llvm_arg_i += 1;
@@ -1438,7 +1442,7 @@ pub const Object = struct {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param = wip.arg(llvm_arg_i);
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .noundef, &o.builder);
llvm_arg_i += 1;
@@ -1456,7 +1460,7 @@ pub const Object = struct {
llvm_arg_i += 1;
const param_llvm_ty = try o.lowerType(param_ty);
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1481,10 +1485,10 @@ pub const Object = struct {
if (ptr_info.flags.is_const) {
try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
}
const elem_align = Builder.Alignment.fromByteUnits(
ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1),
);
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
ptr_info.child.toType().abiAlignment(mod).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
const ptr_param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
@@ -1501,7 +1505,7 @@ pub const Object = struct {
const field_types = it.types_buffer[0..it.types_len];
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const param_alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const param_alignment = param_ty.abiAlignment(mod).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, param_alignment, target);
const llvm_ty = try o.builder.structType(.normal, field_types);
for (0..field_types.len) |field_i| {
@@ -1531,7 +1535,7 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1546,7 +1550,7 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const arg_ptr = try buildAllocaInner(&wip, false, param_llvm_ty, alignment, target);
_ = try wip.store(.normal, param, arg_ptr, alignment);
@@ -1967,7 +1971,7 @@ pub const Object = struct {
di_file,
owner_decl.src_node + 1,
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
enumerators.ptr,
@intCast(enumerators.len),
try o.lowerDebugType(int_ty, .full),
@@ -2055,7 +2059,7 @@ pub const Object = struct {
var offset: u64 = 0;
offset += ptr_size;
offset = std.mem.alignForward(u64, offset, len_align);
offset = len_align.forward(offset);
const len_offset = offset;
const fields: [2]*llvm.DIType = .{
@@ -2065,7 +2069,7 @@ pub const Object = struct {
di_file,
line,
ptr_size * 8, // size in bits
ptr_align * 8, // align in bits
ptr_align.toByteUnits(0) * 8, // align in bits
0, // offset in bits
0, // flags
try o.lowerDebugType(ptr_ty, .full),
@@ -2076,7 +2080,7 @@ pub const Object = struct {
di_file,
line,
len_size * 8, // size in bits
len_align * 8, // align in bits
len_align.toByteUnits(0) * 8, // align in bits
len_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(len_ty, .full),
@@ -2089,7 +2093,7 @@ pub const Object = struct {
di_file,
line,
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -2110,7 +2114,7 @@ pub const Object = struct {
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
target.ptrBitWidth(),
ty.ptrAlignment(mod) * 8,
ty.ptrAlignment(mod).toByteUnits(0) * 8,
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
@@ -2142,7 +2146,7 @@ pub const Object = struct {
.Array => {
const array_di_ty = dib.createArrayType(
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.lowerDebugType(ty.childType(mod), .full),
@intCast(ty.arrayLen(mod)),
);
@@ -2174,7 +2178,7 @@ pub const Object = struct {
const vector_di_ty = dib.createVectorType(
ty.abiSize(mod) * 8,
ty.abiAlignment(mod) * 8,
@intCast(ty.abiAlignment(mod).toByteUnits(0) * 8),
elem_di_type,
ty.vectorLen(mod),
);
@@ -2223,7 +2227,7 @@ pub const Object = struct {
var offset: u64 = 0;
offset += payload_size;
offset = std.mem.alignForward(u64, offset, non_null_align);
offset = non_null_align.forward(offset);
const non_null_offset = offset;
const fields: [2]*llvm.DIType = .{
@@ -2233,7 +2237,7 @@ pub const Object = struct {
di_file,
line,
payload_size * 8, // size in bits
payload_align * 8, // align in bits
payload_align.toByteUnits(0) * 8, // align in bits
0, // offset in bits
0, // flags
try o.lowerDebugType(child_ty, .full),
@@ -2244,7 +2248,7 @@ pub const Object = struct {
di_file,
line,
non_null_size * 8, // size in bits
non_null_align * 8, // align in bits
non_null_align.toByteUnits(0) * 8, // align in bits
non_null_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(non_null_ty, .full),
@@ -2257,7 +2261,7 @@ pub const Object = struct {
di_file,
line,
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -2306,16 +2310,16 @@ pub const Object = struct {
var payload_index: u32 = undefined;
var error_offset: u64 = undefined;
var payload_offset: u64 = undefined;
if (error_align > payload_align) {
if (error_align.compare(.gt, payload_align)) {
error_index = 0;
payload_index = 1;
error_offset = 0;
payload_offset = std.mem.alignForward(u64, error_size, payload_align);
payload_offset = payload_align.forward(error_size);
} else {
payload_index = 0;
error_index = 1;
payload_offset = 0;
error_offset = std.mem.alignForward(u64, payload_size, error_align);
error_offset = error_align.forward(payload_size);
}
var fields: [2]*llvm.DIType = undefined;
@@ -2325,7 +2329,7 @@ pub const Object = struct {
di_file,
line,
error_size * 8, // size in bits
error_align * 8, // align in bits
error_align.toByteUnits(0) * 8, // align in bits
error_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(Type.anyerror, .full),
@@ -2336,7 +2340,7 @@ pub const Object = struct {
di_file,
line,
payload_size * 8, // size in bits
payload_align * 8, // align in bits
payload_align.toByteUnits(0) * 8, // align in bits
payload_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(payload_ty, .full),
@@ -2348,7 +2352,7 @@ pub const Object = struct {
di_file,
line,
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&fields,
@@ -2374,10 +2378,10 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
if (mod.typeToStruct(ty)) |struct_obj| {
if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) {
assert(struct_obj.haveLayout());
const info = struct_obj.backing_int_ty.intInfo(mod);
if (mod.typeToPackedStruct(ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntType(ip).*;
if (backing_int_ty != .none) {
const info = backing_int_ty.toType().intInfo(mod);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
@@ -2417,7 +2421,7 @@ pub const Object = struct {
const field_size = field_ty.toType().abiSize(mod);
const field_align = field_ty.toType().abiAlignment(mod);
const field_offset = std.mem.alignForward(u64, offset, field_align);
const field_offset = field_align.forward(offset);
offset = field_offset + field_size;
const field_name = if (tuple.names.len != 0)
@@ -2432,7 +2436,7 @@ pub const Object = struct {
null, // file
0, // line
field_size * 8, // size in bits
field_align * 8, // align in bits
field_align.toByteUnits(0) * 8, // align in bits
field_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(field_ty.toType(), .full),
@@ -2445,7 +2449,7 @@ pub const Object = struct {
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@@ -2459,10 +2463,8 @@ pub const Object = struct {
try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty));
return full_di_ty;
},
.struct_type => |struct_type| s: {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
if (!struct_obj.haveFieldTypes()) {
.struct_type => |struct_type| {
if (!struct_type.haveFieldTypes(ip)) {
// This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even
// via pointer). The only reason we are hearing about it now is
@@ -2492,37 +2494,41 @@ pub const Object = struct {
return struct_di_ty;
}
const fields = ty.structFields(mod);
const layout = ty.containerLayout(mod);
const struct_type = mod.typeToStruct(ty).?;
var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{};
defer di_fields.deinit(gpa);
try di_fields.ensureUnusedCapacity(gpa, fields.count());
try di_fields.ensureUnusedCapacity(gpa, struct_type.field_types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_size = field_ty.abiSize(mod);
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
);
const field_offset = ty.structFieldOffset(field_index, mod);
var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const field_size = field.ty.abiSize(mod);
const field_align = field.alignment(mod, layout);
const field_offset = std.mem.alignForward(u64, offset, field_align);
offset = field_offset + field_size;
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index});
const field_name = ip.stringToSlice(fields.keys()[field_and_index.index]);
const field_di_ty = try o.lowerDebugType(field_ty, .full);
try di_fields.append(gpa, dib.createMemberType(
fwd_decl.toScope(),
field_name,
ip.stringToSlice(field_name),
null, // file
0, // line
field_size * 8, // size in bits
field_align * 8, // align in bits
field_align.toByteUnits(0) * 8, // align in bits
field_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(field.ty, .full),
field_di_ty,
));
}
@@ -2532,7 +2538,7 @@ pub const Object = struct {
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
di_fields.items.ptr,
@@ -2588,7 +2594,7 @@ pub const Object = struct {
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&di_fields,
@@ -2624,7 +2630,7 @@ pub const Object = struct {
null, // file
0, // line
field_size * 8, // size in bits
field_align * 8, // align in bits
field_align.toByteUnits(0) * 8, // align in bits
0, // offset in bits
0, // flags
field_di_ty,
@@ -2644,7 +2650,7 @@ pub const Object = struct {
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
di_fields.items.ptr,
@intCast(di_fields.items.len),
@@ -2661,12 +2667,12 @@ pub const Object = struct {
var tag_offset: u64 = undefined;
var payload_offset: u64 = undefined;
if (layout.tag_align >= layout.payload_align) {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
tag_offset = 0;
payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
payload_offset = layout.payload_align.forward(layout.tag_size);
} else {
payload_offset = 0;
tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align);
tag_offset = layout.tag_align.forward(layout.payload_size);
}
const tag_di = dib.createMemberType(
@@ -2675,7 +2681,7 @@ pub const Object = struct {
null, // file
0, // line
layout.tag_size * 8,
layout.tag_align * 8, // align in bits
layout.tag_align.toByteUnits(0) * 8,
tag_offset * 8, // offset in bits
0, // flags
try o.lowerDebugType(union_obj.enum_tag_ty.toType(), .full),
@@ -2687,14 +2693,14 @@ pub const Object = struct {
null, // file
0, // line
layout.payload_size * 8, // size in bits
layout.payload_align * 8, // align in bits
layout.payload_align.toByteUnits(0) * 8,
payload_offset * 8, // offset in bits
0, // flags
union_di_ty,
);
const full_di_fields: [2]*llvm.DIType =
if (layout.tag_align >= layout.payload_align)
if (layout.tag_align.compare(.gte, layout.payload_align))
.{ tag_di, payload_di }
else
.{ payload_di, tag_di };
@@ -2705,7 +2711,7 @@ pub const Object = struct {
null, // file
0, // line
ty.abiSize(mod) * 8, // size in bits
ty.abiAlignment(mod) * 8, // align in bits
ty.abiAlignment(mod).toByteUnits(0) * 8, // align in bits
0, // flags
null, // derived from
&full_di_fields,
@@ -2925,8 +2931,8 @@ pub const Object = struct {
else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder),
}
if (fn_info.alignment.toByteUnitsOptional()) |alignment|
function_index.setAlignment(Builder.Alignment.fromByteUnits(alignment), &o.builder);
if (fn_info.alignment != .none)
function_index.setAlignment(fn_info.alignment.toLlvm(), &o.builder);
// Function attributes that are independent of analysis results of the function body.
try o.addCommonFnAttributes(&attributes);
@@ -2949,9 +2955,8 @@ pub const Object = struct {
.byref => {
const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1];
const param_llvm_ty = try o.lowerType(param_ty.toType());
const alignment =
Builder.Alignment.fromByteUnits(param_ty.toType().abiAlignment(mod));
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
const alignment = param_ty.toType().abiAlignment(mod);
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment.toLlvm(), it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
// No attributes needed for these.
@@ -3248,21 +3253,21 @@ pub const Object = struct {
var fields: [3]Builder.Type = undefined;
var fields_len: usize = 2;
const padding_len = if (error_align > payload_align) pad: {
const padding_len = if (error_align.compare(.gt, payload_align)) pad: {
fields[0] = error_type;
fields[1] = payload_type;
const payload_end =
std.mem.alignForward(u64, error_size, payload_align) +
payload_align.forward(error_size) +
payload_size;
const abi_size = std.mem.alignForward(u64, payload_end, error_align);
const abi_size = error_align.forward(payload_end);
break :pad abi_size - payload_end;
} else pad: {
fields[0] = payload_type;
fields[1] = error_type;
const error_end =
std.mem.alignForward(u64, payload_size, error_align) +
error_align.forward(payload_size) +
error_size;
const abi_size = std.mem.alignForward(u64, error_end, payload_align);
const abi_size = payload_align.forward(error_end);
break :pad abi_size - error_end;
};
if (padding_len > 0) {
@@ -3276,60 +3281,74 @@ pub const Object = struct {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) {
assert(struct_obj.haveLayout());
const int_ty = try o.lowerType(struct_obj.backing_int_ty);
if (struct_type.layout == .Packed) {
const int_ty = try o.lowerType(struct_type.backingIntType(ip).toType());
gop.value_ptr.* = int_ty;
return int_ty;
}
const name = try o.builder.string(ip.stringToSlice(
try struct_obj.getFullyQualifiedName(mod),
try mod.declPtr(struct_type.decl.unwrap().?).getFullyQualifiedName(mod),
));
const ty = try o.builder.opaqueType(name);
gop.value_ptr.* = ty; // must be done before any recursive calls
assert(struct_obj.haveFieldTypes());
var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_field_types.deinit(o.gpa);
// Although we can estimate how much capacity to add, these cannot be
// relied upon because of the recursive calls to lowerType below.
try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_obj.fields.count());
try o.struct_field_map.ensureUnusedCapacity(o.gpa, @intCast(struct_obj.fields.count()));
try llvm_field_types.ensureUnusedCapacity(o.gpa, struct_type.field_types.len);
try o.struct_field_map.ensureUnusedCapacity(o.gpa, struct_type.field_types.len);
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 1;
var big_align: InternPool.Alignment = .@"1";
var struct_kind: Builder.Type.Structure.Kind = .normal;
var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const field_align = field.alignment(mod, struct_obj.layout);
const field_ty_align = field.ty.abiAlignment(mod);
if (field_align < field_ty_align) struct_kind = .@"packed";
big_align = @max(big_align, field_align);
// When we encounter a zero-bit field, we place it here so we know to map it to the next non-zero-bit field (if any).
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
);
const field_ty_align = field_ty.abiAlignment(mod);
if (field_align.compare(.lt, field_ty_align)) struct_kind = .@"packed";
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, field_align);
offset = field_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) try llvm_field_types.append(
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
if (offset != struct_type.size(ip).*) {
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = field_index,
}, @intCast(llvm_field_types.items.len));
}
continue;
}
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = field_and_index.index,
.field_index = field_index,
}, @intCast(llvm_field_types.items.len));
try llvm_field_types.append(o.gpa, try o.lowerType(field.ty));
try llvm_field_types.append(o.gpa, try o.lowerType(field_ty));
offset += field.ty.abiSize(mod);
offset += field_ty.abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, big_align);
offset = big_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) try llvm_field_types.append(
o.gpa,
@@ -3353,25 +3372,39 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var offset: u64 = 0;
var big_align: u32 = 0;
var big_align: InternPool.Alignment = .none;
const struct_size = t.abiSize(mod);
for (
anon_struct_type.types.get(ip),
anon_struct_type.values.get(ip),
0..,
) |field_ty, field_val, field_index| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue;
if (field_val != .none) continue;
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, field_align);
offset = field_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) try llvm_field_types.append(
o.gpa,
try o.builder.arrayType(padding_len, .i8),
);
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field. If there are runtime bits after this field,
// map to the next LLVM field (which we know exists): otherwise, don't
// map the field, indicating it's at the end of the struct.
if (offset != struct_size) {
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = @intCast(field_index),
}, @intCast(llvm_field_types.items.len));
}
continue;
}
try o.struct_field_map.put(o.gpa, .{
.struct_ty = t.toIntern(),
.field_index = @intCast(field_index),
@@ -3382,7 +3415,7 @@ pub const Object = struct {
}
{
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, big_align);
offset = big_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) try llvm_field_types.append(
o.gpa,
@@ -3447,7 +3480,7 @@ pub const Object = struct {
var llvm_fields: [3]Builder.Type = undefined;
var llvm_fields_len: usize = 2;
if (layout.tag_align >= layout.payload_align) {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
llvm_fields = .{ enum_tag_ty, payload_ty, .none };
} else {
llvm_fields = .{ payload_ty, enum_tag_ty, .none };
@@ -3687,7 +3720,7 @@ pub const Object = struct {
var fields: [3]Builder.Type = undefined;
var vals: [3]Builder.Constant = undefined;
if (error_align > payload_align) {
if (error_align.compare(.gt, payload_align)) {
vals[0] = llvm_error_value;
vals[1] = llvm_payload_value;
} else {
@@ -3910,7 +3943,7 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var llvm_index: usize = 0;
var offset: u64 = 0;
var big_align: u32 = 0;
var big_align: InternPool.Alignment = .none;
var need_unnamed = false;
for (
tuple.types.get(ip),
@@ -3921,9 +3954,9 @@ pub const Object = struct {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, field_align);
offset = field_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
@@ -3946,7 +3979,7 @@ pub const Object = struct {
}
{
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, big_align);
offset = big_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
@@ -3963,22 +3996,21 @@ pub const Object = struct {
struct_ty, vals);
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveLayout());
assert(struct_type.haveLayout(ip));
const struct_ty = try o.lowerType(ty);
if (struct_obj.layout == .Packed) {
if (struct_type.layout == .Packed) {
comptime assert(Type.packed_struct_layout_version == 2);
var running_int = try o.builder.intConst(struct_ty, 0);
var running_bits: u16 = 0;
for (struct_obj.fields.values(), 0..) |field, field_index| {
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
for (struct_type.field_types.get(ip), 0..) |field_ty, field_index| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val =
try o.lowerValue((try val.fieldValue(mod, field_index)).toIntern());
const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod));
const ty_bit_size: u16 = @intCast(field_ty.toType().bitSize(mod));
const small_int_ty = try o.builder.intType(ty_bit_size);
const small_int_val = try o.builder.castConst(
if (field.ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
if (field_ty.toType().isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
non_int_val,
small_int_ty,
);
@@ -4010,15 +4042,19 @@ pub const Object = struct {
comptime assert(struct_layout_version == 2);
var llvm_index: usize = 0;
var offset: u64 = 0;
var big_align: u32 = 0;
var big_align: InternPool.Alignment = .@"1";
var need_unnamed = false;
var field_it = struct_obj.runtimeFieldIterator(mod);
while (field_it.next()) |field_and_index| {
const field = field_and_index.field;
const field_align = field.alignment(mod, struct_obj.layout);
big_align = @max(big_align, field_align);
var field_it = struct_type.iterateRuntimeOrder(ip);
while (field_it.next()) |field_index| {
const field_ty = struct_type.field_types.get(ip)[field_index].toType();
const field_align = mod.structFieldAlignment(
struct_type.fieldAlign(ip, field_index),
field_ty,
struct_type.layout,
);
big_align = big_align.max(field_align);
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, field_align);
offset = field_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
@@ -4031,19 +4067,24 @@ pub const Object = struct {
llvm_index += 1;
}
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// This is a zero-bit field - we only needed it for the alignment.
continue;
}
vals[llvm_index] = try o.lowerValue(
(try val.fieldValue(mod, field_and_index.index)).toIntern(),
(try val.fieldValue(mod, field_index)).toIntern(),
);
fields[llvm_index] = vals[llvm_index].typeOf(&o.builder);
if (fields[llvm_index] != struct_ty.structFields(&o.builder)[llvm_index])
need_unnamed = true;
llvm_index += 1;
offset += field.ty.abiSize(mod);
offset += field_ty.abiSize(mod);
}
{
const prev_offset = offset;
offset = std.mem.alignForward(u64, offset, big_align);
offset = big_align.forward(offset);
const padding_len = offset - prev_offset;
if (padding_len > 0) {
fields[llvm_index] = try o.builder.arrayType(padding_len, .i8);
@@ -4093,7 +4134,7 @@ pub const Object = struct {
const payload = try o.lowerValue(un.val);
const payload_ty = payload.typeOf(&o.builder);
if (payload_ty != union_ty.structFields(&o.builder)[
@intFromBool(layout.tag_align >= layout.payload_align)
@intFromBool(layout.tag_align.compare(.gte, layout.payload_align))
]) need_unnamed = true;
const field_size = field_ty.abiSize(mod);
if (field_size == layout.payload_size) break :p payload;
@@ -4115,7 +4156,7 @@ pub const Object = struct {
var fields: [3]Builder.Type = undefined;
var vals: [3]Builder.Constant = undefined;
var len: usize = 2;
if (layout.tag_align >= layout.payload_align) {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
fields = .{ tag_ty, payload_ty, undefined };
vals = .{ tag, payload, undefined };
} else {
@@ -4174,14 +4215,15 @@ pub const Object = struct {
fn lowerParentPtr(o: *Object, ptr_val: Value, byte_aligned: bool) Allocator.Error!Builder.Constant {
const mod = o.module;
return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ptr_val.toIntern()).ptr.addr) {
.decl => |decl| o.lowerParentPtrDecl(decl),
.mut_decl => |mut_decl| o.lowerParentPtrDecl(mut_decl.decl),
.int => |int| try o.lowerIntAsPtr(int),
.eu_payload => |eu_ptr| {
const parent_ptr = try o.lowerParentPtr(eu_ptr.toValue(), true);
const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod);
const eu_ty = ip.typeOf(eu_ptr).toType().childType(mod);
const payload_ty = eu_ty.errorUnionPayload(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// In this case, we represent pointer to error union the same as pointer
@@ -4189,8 +4231,9 @@ pub const Object = struct {
return parent_ptr;
}
const index: u32 =
if (payload_ty.abiAlignment(mod) > Type.err_int.abiSize(mod)) 2 else 1;
const payload_align = payload_ty.abiAlignment(mod);
const err_align = Type.err_int.abiAlignment(mod);
const index: u32 = if (payload_align.compare(.gt, err_align)) 2 else 1;
return o.builder.gepConst(.inbounds, try o.lowerType(eu_ty), parent_ptr, null, &.{
try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, index),
});
@@ -4198,7 +4241,7 @@ pub const Object = struct {
.opt_payload => |opt_ptr| {
const parent_ptr = try o.lowerParentPtr(opt_ptr.toValue(), true);
const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod);
const opt_ty = ip.typeOf(opt_ptr).toType().childType(mod);
const payload_ty = opt_ty.optionalChild(mod);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or
payload_ty.optionalReprIsPayload(mod))
@@ -4215,7 +4258,7 @@ pub const Object = struct {
.comptime_field => unreachable,
.elem => |elem_ptr| {
const parent_ptr = try o.lowerParentPtr(elem_ptr.base.toValue(), true);
const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod);
const elem_ty = ip.typeOf(elem_ptr.base).toType().elemType2(mod);
return o.builder.gepConst(.inbounds, try o.lowerType(elem_ty), parent_ptr, null, &.{
try o.builder.intConst(try o.lowerType(Type.usize), elem_ptr.index),
@@ -4223,7 +4266,7 @@ pub const Object = struct {
},
.field => |field_ptr| {
const parent_ptr = try o.lowerParentPtr(field_ptr.base.toValue(), byte_aligned);
const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod);
const parent_ty = ip.typeOf(field_ptr.base).toType().childType(mod);
const field_index: u32 = @intCast(field_ptr.index);
switch (parent_ty.zigTypeTag(mod)) {
@@ -4241,24 +4284,26 @@ pub const Object = struct {
const parent_llvm_ty = try o.lowerType(parent_ty);
return o.builder.gepConst(.inbounds, parent_llvm_ty, parent_ptr, null, &.{
try o.builder.intConst(.i32, 0), try o.builder.intConst(.i32, @intFromBool(
layout.tag_size > 0 and layout.tag_align >= layout.payload_align,
try o.builder.intConst(.i32, 0),
try o.builder.intConst(.i32, @intFromBool(
layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align),
)),
});
},
.Struct => {
if (parent_ty.containerLayout(mod) == .Packed) {
if (mod.typeToPackedStruct(parent_ty)) |struct_type| {
if (!byte_aligned) return parent_ptr;
const llvm_usize = try o.lowerType(Type.usize);
const base_addr =
try o.builder.castConst(.ptrtoint, parent_ptr, llvm_usize);
// count bits of fields before this one
// TODO https://github.com/ziglang/zig/issues/17178
const prev_bits = b: {
var b: usize = 0;
for (parent_ty.structFields(mod).values()[0..field_index]) |field| {
if (field.is_comptime) continue;
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
b += @intCast(field.ty.bitSize(mod));
for (0..field_index) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
b += @intCast(field_ty.bitSize(mod));
}
break :b b;
};
@@ -4407,11 +4452,11 @@ pub const Object = struct {
if (ptr_info.flags.is_const) {
try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
}
const elem_align = Builder.Alignment.fromByteUnits(
ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1),
);
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
const elem_align = if (ptr_info.flags.alignment != .none)
ptr_info.flags.alignment
else
ptr_info.child.toType().abiAlignment(mod).max(.@"1");
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align.toLlvm() }, &o.builder);
} else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) {
.signed => try attributes.addParamAttr(llvm_arg_i, .signext, &o.builder),
.unsigned => try attributes.addParamAttr(llvm_arg_i, .zeroext, &o.builder),
@@ -4469,7 +4514,7 @@ pub const DeclGen = struct {
} else {
const variable_index = try o.resolveGlobalDecl(decl_index);
variable_index.setAlignment(
Builder.Alignment.fromByteUnits(decl.getAlignment(mod)),
decl.getAlignment(mod).toLlvm(),
&o.builder,
);
if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section|
@@ -4611,9 +4656,7 @@ pub const FuncGen = struct {
variable_index.setLinkage(.private, &o.builder);
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
variable_index.setAlignment(Builder.Alignment.fromByteUnits(
tv.ty.abiAlignment(mod),
), &o.builder);
variable_index.setAlignment(tv.ty.abiAlignment(mod).toLlvm(), &o.builder);
return o.builder.convConst(
.unneeded,
variable_index.toConst(&o.builder),
@@ -4929,7 +4972,7 @@ pub const FuncGen = struct {
const llvm_ret_ty = try o.lowerType(return_type);
try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
const alignment = return_type.abiAlignment(mod).toLlvm();
const ret_ptr = try self.buildAlloca(llvm_ret_ty, alignment);
try llvm_args.append(ret_ptr);
break :blk ret_ptr;
@@ -4951,7 +4994,7 @@ pub const FuncGen = struct {
const llvm_arg = try self.resolveInst(arg);
const llvm_param_ty = try o.lowerType(param_ty);
if (isByRef(param_ty, mod)) {
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const loaded = try self.wip.load(.normal, llvm_param_ty, llvm_arg, alignment, "");
try llvm_args.append(loaded);
} else {
@@ -4965,7 +5008,7 @@ pub const FuncGen = struct {
if (isByRef(param_ty, mod)) {
try llvm_args.append(llvm_arg);
} else {
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const param_llvm_ty = llvm_arg.typeOfWip(&self.wip);
const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
_ = try self.wip.store(.normal, llvm_arg, arg_ptr, alignment);
@@ -4977,7 +5020,7 @@ pub const FuncGen = struct {
const param_ty = self.typeOf(arg);
const llvm_arg = try self.resolveInst(arg);
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const param_llvm_ty = try o.lowerType(param_ty);
const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
if (isByRef(param_ty, mod)) {
@@ -4995,13 +5038,13 @@ pub const FuncGen = struct {
const int_llvm_ty = try o.builder.intType(@intCast(param_ty.abiSize(mod) * 8));
if (isByRef(param_ty, mod)) {
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const loaded = try self.wip.load(.normal, int_llvm_ty, llvm_arg, alignment, "");
try llvm_args.append(loaded);
} else {
// LLVM does not allow bitcasting structs so we must allocate
// a local, store as one type, and then load as another type.
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const int_ptr = try self.buildAlloca(int_llvm_ty, alignment);
_ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
@@ -5022,7 +5065,7 @@ pub const FuncGen = struct {
const llvm_arg = try self.resolveInst(arg);
const is_by_ref = isByRef(param_ty, mod);
const arg_ptr = if (is_by_ref) llvm_arg else ptr: {
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
break :ptr ptr;
@@ -5048,7 +5091,7 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
const alignment = arg_ty.abiAlignment(mod).toLlvm();
if (!isByRef(arg_ty, mod)) {
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
@@ -5066,7 +5109,7 @@ pub const FuncGen = struct {
const arg = args[it.zig_index - 1];
const arg_ty = self.typeOf(arg);
var llvm_arg = try self.resolveInst(arg);
const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
const alignment = arg_ty.abiAlignment(mod).toLlvm();
if (!isByRef(arg_ty, mod)) {
const ptr = try self.buildAlloca(llvm_arg.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, llvm_arg, ptr, alignment);
@@ -5097,7 +5140,7 @@ pub const FuncGen = struct {
const param_index = it.zig_index - 1;
const param_ty = fn_info.param_types.get(ip)[param_index].toType();
const param_llvm_ty = try o.lowerType(param_ty);
const alignment = Builder.Alignment.fromByteUnits(param_ty.abiAlignment(mod));
const alignment = param_ty.abiAlignment(mod).toLlvm();
try o.addByRefParamAttrs(&attributes, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty);
},
.byref_mut => try attributes.addParamAttr(it.llvm_index - 1, .noundef, &o.builder),
@@ -5128,10 +5171,10 @@ pub const FuncGen = struct {
if (ptr_info.flags.is_const) {
try attributes.addParamAttr(llvm_arg_i, .readonly, &o.builder);
}
const elem_align = Builder.Alignment.fromByteUnits(
ptr_info.flags.alignment.toByteUnitsOptional() orelse
@max(ptr_info.child.toType().abiAlignment(mod), 1),
);
const elem_align = (if (ptr_info.flags.alignment != .none)
@as(InternPool.Alignment, ptr_info.flags.alignment)
else
ptr_info.child.toType().abiAlignment(mod).max(.@"1")).toLlvm();
try attributes.addParamAttr(llvm_arg_i, .{ .@"align" = elem_align }, &o.builder);
},
};
@@ -5166,7 +5209,7 @@ pub const FuncGen = struct {
return rp;
} else {
// our by-ref status disagrees with sret so we must load.
const return_alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
const return_alignment = return_type.abiAlignment(mod).toLlvm();
return self.wip.load(.normal, llvm_ret_ty, rp, return_alignment, "");
}
}
@@ -5177,7 +5220,7 @@ pub const FuncGen = struct {
// In this case the function return type is honoring the calling convention by having
// a different LLVM type than the usual one. We solve this here at the callsite
// by using our canonical type, then loading it if necessary.
const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
const alignment = return_type.abiAlignment(mod).toLlvm();
if (o.builder.useLibLlvm())
assert(o.target_data.abiSizeOfType(abi_ret_ty.toLlvm(&o.builder)) >=
o.target_data.abiSizeOfType(llvm_ret_ty.toLlvm(&o.builder)));
@@ -5192,7 +5235,7 @@ pub const FuncGen = struct {
if (isByRef(return_type, mod)) {
// our by-ref status disagrees with sret so we must allocate, store,
// and return the allocation pointer.
const alignment = Builder.Alignment.fromByteUnits(return_type.abiAlignment(mod));
const alignment = return_type.abiAlignment(mod).toLlvm();
const rp = try self.buildAlloca(llvm_ret_ty, alignment);
_ = try self.wip.store(.normal, call, rp, alignment);
return rp;
@@ -5266,7 +5309,7 @@ pub const FuncGen = struct {
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
const operand = try self.resolveInst(un_op);
const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod));
const alignment = ret_ty.abiAlignment(mod).toLlvm();
if (isByRef(ret_ty, mod)) {
// operand is a pointer however self.ret_ptr is null so that means
@@ -5311,7 +5354,7 @@ pub const FuncGen = struct {
}
const ptr = try self.resolveInst(un_op);
const abi_ret_ty = try lowerFnRetTy(o, fn_info);
const alignment = Builder.Alignment.fromByteUnits(ret_ty.abiAlignment(mod));
const alignment = ret_ty.abiAlignment(mod).toLlvm();
_ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, ""));
return .none;
}
@@ -5334,7 +5377,7 @@ pub const FuncGen = struct {
const llvm_va_list_ty = try o.lowerType(va_list_ty);
const mod = o.module;
const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod));
const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
_ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, "");
@@ -5358,7 +5401,7 @@ pub const FuncGen = struct {
const va_list_ty = self.typeOfIndex(inst);
const llvm_va_list_ty = try o.lowerType(va_list_ty);
const result_alignment = Builder.Alignment.fromByteUnits(va_list_ty.abiAlignment(mod));
const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
_ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, "");
@@ -5690,7 +5733,7 @@ pub const FuncGen = struct {
return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
} else if (isByRef(err_union_ty, mod)) {
const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
@@ -5997,7 +6040,7 @@ pub const FuncGen = struct {
if (self.canElideLoad(body_tail))
return ptr;
const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
}
@@ -6037,7 +6080,7 @@ pub const FuncGen = struct {
const elem_ptr =
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
if (canElideLoad(self, body_tail)) return elem_ptr;
const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
} else {
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -6097,7 +6140,7 @@ pub const FuncGen = struct {
&.{rhs}, "");
if (isByRef(elem_ty, mod)) {
if (self.canElideLoad(body_tail)) return ptr;
const elem_alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
return self.loadByRef(ptr, elem_ty, elem_alignment, .normal);
}
@@ -6111,7 +6154,7 @@ pub const FuncGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType(mod);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return (try o.lowerPtrToVoid(ptr_ty)).toValue();
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.resolveInst(bin_op.lhs);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@@ -6163,8 +6206,8 @@ pub const FuncGen = struct {
switch (struct_ty.zigTypeTag(mod)) {
.Struct => switch (struct_ty.containerLayout(mod)) {
.Packed => {
const struct_obj = mod.typeToStruct(struct_ty).?;
const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index);
const struct_type = mod.typeToStruct(struct_ty).?;
const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
const shift_amt =
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
@@ -6220,16 +6263,14 @@ pub const FuncGen = struct {
const alignment = struct_ty.structFieldAlign(field_index, mod);
const field_ptr_ty = try mod.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(alignment),
},
.flags = .{ .alignment = alignment },
});
if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail))
return field_ptr;
assert(alignment != 0);
const field_alignment = Builder.Alignment.fromByteUnits(alignment);
assert(alignment != .none);
const field_alignment = alignment.toLlvm();
return self.loadByRef(field_ptr, field_ty, field_alignment, .normal);
} else {
return self.load(field_ptr, field_ptr_ty);
@@ -6238,11 +6279,11 @@ pub const FuncGen = struct {
.Union => {
const union_llvm_ty = try o.lowerType(struct_ty);
const layout = struct_ty.unionGetLayout(mod);
const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const field_ptr =
try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
const llvm_field_ty = try o.lowerType(field_ty);
const payload_alignment = Builder.Alignment.fromByteUnits(layout.payload_align);
const payload_alignment = layout.payload_align.toLlvm();
if (isByRef(field_ty, mod)) {
if (canElideLoad(self, body_tail)) return field_ptr;
return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
@@ -6457,7 +6498,7 @@ pub const FuncGen = struct {
if (isByRef(operand_ty, mod)) {
_ = dib.insertDeclareAtEnd(operand.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else if (o.module.comp.bin_file.options.optimize_mode == .Debug) {
const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod));
const alignment = operand_ty.abiAlignment(mod).toLlvm();
const alloca = try self.buildAlloca(operand.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, operand, alloca, alignment);
_ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
@@ -6612,7 +6653,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
const alignment = arg_ty.abiAlignment(mod).toLlvm();
const arg_llvm_ty = try o.lowerType(arg_ty);
const load_inst =
try self.wip.load(.normal, arg_llvm_ty, arg_llvm_value, alignment, "");
@@ -6624,7 +6665,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = arg_llvm_value;
llvm_param_types[llvm_param_i] = arg_llvm_value.typeOfWip(&self.wip);
} else {
const alignment = Builder.Alignment.fromByteUnits(arg_ty.abiAlignment(mod));
const alignment = arg_ty.abiAlignment(mod).toLlvm();
const arg_ptr = try self.buildAlloca(arg_llvm_value.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_llvm_value, arg_ptr, alignment);
llvm_param_values[llvm_param_i] = arg_ptr;
@@ -6676,7 +6717,7 @@ pub const FuncGen = struct {
llvm_param_values[llvm_param_i] = llvm_rw_val;
llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip);
} else {
const alignment = Builder.Alignment.fromByteUnits(rw_ty.abiAlignment(mod));
const alignment = rw_ty.abiAlignment(mod).toLlvm();
const loaded = try self.wip.load(.normal, llvm_elem_ty, llvm_rw_val, alignment, "");
llvm_param_values[llvm_param_i] = loaded;
llvm_param_types[llvm_param_i] = llvm_elem_ty;
@@ -6837,7 +6878,7 @@ pub const FuncGen = struct {
const output_ptr = try self.resolveInst(output);
const output_ptr_ty = self.typeOf(output);
const alignment = Builder.Alignment.fromByteUnits(output_ptr_ty.ptrAlignment(mod));
const alignment = output_ptr_ty.ptrAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, output_value, output_ptr, alignment);
} else {
ret_val = output_value;
@@ -7030,7 +7071,7 @@ pub const FuncGen = struct {
if (operand_is_ptr) {
return self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
} else if (isByRef(err_union_ty, mod)) {
const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
if (isByRef(payload_ty, mod)) {
if (self.canElideLoad(body_tail)) return payload_ptr;
@@ -7093,7 +7134,7 @@ pub const FuncGen = struct {
}
const err_union_llvm_ty = try o.lowerType(err_union_ty);
{
const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
const error_alignment = Type.err_int.abiAlignment(mod).toLlvm();
const error_offset = errUnionErrorOffset(payload_ty, mod);
// First set the non-error value.
const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
@@ -7133,9 +7174,7 @@ pub const FuncGen = struct {
const field_ty = struct_ty.structFieldType(field_index, mod);
const field_ptr_ty = try mod.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(field_alignment),
},
.flags = .{ .alignment = field_alignment },
});
return self.load(field_ptr, field_ptr_ty);
}
@@ -7153,7 +7192,7 @@ pub const FuncGen = struct {
if (optional_ty.optionalReprIsPayload(mod)) return operand;
const llvm_optional_ty = try o.lowerType(optional_ty);
if (isByRef(optional_ty, mod)) {
const alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod));
const alignment = optional_ty.abiAlignment(mod).toLlvm();
const optional_ptr = try self.buildAlloca(llvm_optional_ty, alignment);
const payload_ptr = try self.wip.gepStruct(llvm_optional_ty, optional_ptr, 0, "");
const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
@@ -7181,10 +7220,10 @@ pub const FuncGen = struct {
const payload_offset = errUnionPayloadOffset(payload_ty, mod);
const error_offset = errUnionErrorOffset(payload_ty, mod);
if (isByRef(err_un_ty, mod)) {
const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod));
const alignment = err_un_ty.abiAlignment(mod).toLlvm();
const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
const error_alignment = Type.err_int.abiAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, ok_err_code, err_ptr, error_alignment);
const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
@@ -7210,10 +7249,10 @@ pub const FuncGen = struct {
const payload_offset = errUnionPayloadOffset(payload_ty, mod);
const error_offset = errUnionErrorOffset(payload_ty, mod);
if (isByRef(err_un_ty, mod)) {
const alignment = Builder.Alignment.fromByteUnits(err_un_ty.abiAlignment(mod));
const alignment = err_un_ty.abiAlignment(mod).toLlvm();
const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
const err_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, error_offset, "");
const error_alignment = Builder.Alignment.fromByteUnits(Type.err_int.abiAlignment(mod));
const error_alignment = Type.err_int.abiAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, operand, err_ptr, error_alignment);
const payload_ptr = try self.wip.gepStruct(err_un_llvm_ty, result_ptr, payload_offset, "");
const payload_ptr_ty = try mod.singleMutPtrType(payload_ty);
@@ -7260,7 +7299,7 @@ pub const FuncGen = struct {
const access_kind: Builder.MemoryAccessKind =
if (vector_ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(vector_ptr_ty.childType(mod));
const alignment = Builder.Alignment.fromByteUnits(vector_ptr_ty.ptrAlignment(mod));
const alignment = vector_ptr_ty.ptrAlignment(mod).toLlvm();
const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
const new_vector = try self.wip.insertElement(loaded, operand, index, "");
@@ -7690,7 +7729,7 @@ pub const FuncGen = struct {
const overflow_index = o.llvmFieldIndex(inst_ty, 1).?;
if (isByRef(inst_ty, mod)) {
const result_alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
const result_alignment = inst_ty.abiAlignment(mod).toLlvm();
const alloca_inst = try self.buildAlloca(llvm_inst_ty, result_alignment);
{
const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
@@ -8048,7 +8087,7 @@ pub const FuncGen = struct {
const overflow_index = o.llvmFieldIndex(dest_ty, 1).?;
if (isByRef(dest_ty, mod)) {
const result_alignment = Builder.Alignment.fromByteUnits(dest_ty.abiAlignment(mod));
const result_alignment = dest_ty.abiAlignment(mod).toLlvm();
const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment);
{
const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
@@ -8321,7 +8360,7 @@ pub const FuncGen = struct {
const array_ptr = try self.buildAlloca(llvm_dest_ty, .default);
const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
if (bitcast_ok) {
const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
const alignment = inst_ty.abiAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, operand, array_ptr, alignment);
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8349,7 +8388,7 @@ pub const FuncGen = struct {
if (bitcast_ok) {
// The array is aligned to the element's alignment, while the vector might have a completely
// different alignment. This means we need to enforce the alignment of this load.
const alignment = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
const alignment = elem_ty.abiAlignment(mod).toLlvm();
return self.wip.load(.normal, llvm_vector_ty, operand, alignment, "");
} else {
// If the ABI size of the element type is not evenly divisible by size in bits;
@@ -8374,14 +8413,12 @@ pub const FuncGen = struct {
}
if (operand_is_ref) {
const alignment = Builder.Alignment.fromByteUnits(operand_ty.abiAlignment(mod));
const alignment = operand_ty.abiAlignment(mod).toLlvm();
return self.wip.load(.normal, llvm_dest_ty, operand, alignment, "");
}
if (result_is_ref) {
const alignment = Builder.Alignment.fromByteUnits(
@max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)),
);
const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
return result_ptr;
@@ -8393,9 +8430,7 @@ pub const FuncGen = struct {
// Both our operand and our result are values, not pointers,
// but LLVM won't let us bitcast struct values or vectors with padding bits.
// Therefore, we store operand to alloca, then load for result.
const alignment = Builder.Alignment.fromByteUnits(
@max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)),
);
const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
@@ -8441,7 +8476,7 @@ pub const FuncGen = struct {
if (isByRef(inst_ty, mod)) {
_ = dib.insertDeclareAtEnd(arg_val.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
} else if (o.module.comp.bin_file.options.optimize_mode == .Debug) {
const alignment = Builder.Alignment.fromByteUnits(inst_ty.abiAlignment(mod));
const alignment = inst_ty.abiAlignment(mod).toLlvm();
const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment);
_ = try self.wip.store(.normal, arg_val, alloca, alignment);
_ = dib.insertDeclareAtEnd(alloca.toLlvm(&self.wip), di_local_var, debug_loc, insert_block);
@@ -8462,7 +8497,7 @@ pub const FuncGen = struct {
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
const pointee_llvm_ty = try o.lowerType(pointee_type);
const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
return self.buildAlloca(pointee_llvm_ty, alignment);
}
@@ -8475,7 +8510,7 @@ pub const FuncGen = struct {
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
if (self.ret_ptr != .none) return self.ret_ptr;
const ret_llvm_ty = try o.lowerType(ret_ty);
const alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
return self.buildAlloca(ret_llvm_ty, alignment);
}
@@ -8515,7 +8550,7 @@ pub const FuncGen = struct {
const len = try o.builder.intValue(try o.lowerType(Type.usize), operand_ty.abiSize(mod));
_ = try self.wip.callMemSet(
dest_ptr,
Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)),
ptr_ty.ptrAlignment(mod).toLlvm(),
if (safety) try o.builder.intValue(.i8, 0xaa) else try o.builder.undefValue(.i8),
len,
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal,
@@ -8646,7 +8681,7 @@ pub const FuncGen = struct {
self.sync_scope,
toLlvmAtomicOrdering(extra.successOrder()),
toLlvmAtomicOrdering(extra.failureOrder()),
Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod)),
ptr_ty.ptrAlignment(mod).toLlvm(),
"",
);
@@ -8685,7 +8720,7 @@ pub const FuncGen = struct {
const access_kind: Builder.MemoryAccessKind =
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm();
if (llvm_abi_ty != .none) {
// operand needs widening and truncating or bitcasting.
@@ -8741,9 +8776,10 @@ pub const FuncGen = struct {
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
const ordering = toLlvmAtomicOrdering(atomic_load.order);
const llvm_abi_ty = try o.getAtomicAbiType(elem_ty, false);
const ptr_alignment = Builder.Alignment.fromByteUnits(
info.flags.alignment.toByteUnitsOptional() orelse info.child.toType().abiAlignment(mod),
);
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
info.child.toType().abiAlignment(mod)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(elem_ty);
@@ -8807,7 +8843,7 @@ pub const FuncGen = struct {
const dest_slice = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = self.typeOf(bin_op.rhs);
const dest_ptr_align = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
const dest_ptr_align = ptr_ty.ptrAlignment(mod).toLlvm();
const dest_ptr = try self.sliceOrArrayPtr(dest_slice, ptr_ty);
const access_kind: Builder.MemoryAccessKind =
if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal;
@@ -8911,15 +8947,13 @@ pub const FuncGen = struct {
self.wip.cursor = .{ .block = body_block };
const elem_abi_align = elem_ty.abiAlignment(mod);
const it_ptr_align = Builder.Alignment.fromByteUnits(
@min(elem_abi_align, dest_ptr_align.toByteUnits() orelse std.math.maxInt(u64)),
);
const it_ptr_align = InternPool.Alignment.fromLlvm(dest_ptr_align).min(elem_abi_align).toLlvm();
if (isByRef(elem_ty, mod)) {
_ = try self.wip.callMemCpy(
it_ptr.toValue(),
it_ptr_align,
value,
Builder.Alignment.fromByteUnits(elem_abi_align),
elem_abi_align.toLlvm(),
try o.builder.intValue(llvm_usize_ty, elem_abi_size),
access_kind,
);
@@ -8985,9 +9019,9 @@ pub const FuncGen = struct {
self.wip.cursor = .{ .block = memcpy_block };
_ = try self.wip.callMemCpy(
dest_ptr,
Builder.Alignment.fromByteUnits(dest_ptr_ty.ptrAlignment(mod)),
dest_ptr_ty.ptrAlignment(mod).toLlvm(),
src_ptr,
Builder.Alignment.fromByteUnits(src_ptr_ty.ptrAlignment(mod)),
src_ptr_ty.ptrAlignment(mod).toLlvm(),
len,
access_kind,
);
@@ -8998,9 +9032,9 @@ pub const FuncGen = struct {
_ = try self.wip.callMemCpy(
dest_ptr,
Builder.Alignment.fromByteUnits(dest_ptr_ty.ptrAlignment(mod)),
dest_ptr_ty.ptrAlignment(mod).toLlvm(),
src_ptr,
Builder.Alignment.fromByteUnits(src_ptr_ty.ptrAlignment(mod)),
src_ptr_ty.ptrAlignment(mod).toLlvm(),
len,
access_kind,
);
@@ -9021,7 +9055,7 @@ pub const FuncGen = struct {
_ = try self.wip.store(.normal, new_tag, union_ptr, .default);
return .none;
}
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
const tag_field_ptr = try self.wip.gepStruct(try o.lowerType(un_ty), union_ptr, tag_index, "");
// TODO alignment on this store
_ = try self.wip.store(.normal, new_tag, tag_field_ptr, .default);
@@ -9040,13 +9074,13 @@ pub const FuncGen = struct {
const llvm_un_ty = try o.lowerType(un_ty);
if (layout.payload_size == 0)
return self.wip.load(.normal, llvm_un_ty, union_handle, .default, "");
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
const tag_field_ptr = try self.wip.gepStruct(llvm_un_ty, union_handle, tag_index, "");
const llvm_tag_ty = llvm_un_ty.structFields(&o.builder)[tag_index];
return self.wip.load(.normal, llvm_tag_ty, tag_field_ptr, .default, "");
} else {
if (layout.payload_size == 0) return union_handle;
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
return self.wip.extractValue(union_handle, &.{tag_index}, "");
}
}
@@ -9605,6 +9639,7 @@ pub const FuncGen = struct {
fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.dg.object;
const mod = o.module;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.typeOfIndex(inst);
const len: usize = @intCast(result_ty.arrayLen(mod));
@@ -9622,23 +9657,21 @@ pub const FuncGen = struct {
return vector;
},
.Struct => {
if (result_ty.containerLayout(mod) == .Packed) {
const struct_obj = mod.typeToStruct(result_ty).?;
assert(struct_obj.haveLayout());
const big_bits = struct_obj.backing_int_ty.bitSize(mod);
if (mod.typeToPackedStruct(result_ty)) |struct_type| {
const backing_int_ty = struct_type.backingIntType(ip).*;
assert(backing_int_ty != .none);
const big_bits = backing_int_ty.toType().bitSize(mod);
const int_ty = try o.builder.intType(@intCast(big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
var running_int = try o.builder.intValue(int_ty, 0);
var running_bits: u16 = 0;
for (elements, 0..) |elem, i| {
const field = fields[i];
if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
for (elements, struct_type.field_types.get(ip)) |elem, field_ty| {
if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
const non_int_val = try self.resolveInst(elem);
const ty_bit_size: u16 = @intCast(field.ty.bitSize(mod));
const ty_bit_size: u16 = @intCast(field_ty.toType().bitSize(mod));
const small_int_ty = try o.builder.intType(ty_bit_size);
const small_int_val = if (field.ty.isPtrAtRuntime(mod))
const small_int_val = if (field_ty.toType().isPtrAtRuntime(mod))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
@@ -9652,10 +9685,12 @@ pub const FuncGen = struct {
return running_int;
}
assert(result_ty.containerLayout(mod) != .Packed);
if (isByRef(result_ty, mod)) {
// TODO in debug builds init to undef so that the padding will be 0xaa
// even if we fully populate the fields.
const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod));
const alignment = result_ty.abiAlignment(mod).toLlvm();
const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
for (elements, 0..) |elem, i| {
@@ -9668,9 +9703,7 @@ pub const FuncGen = struct {
const field_ptr_ty = try mod.ptrType(.{
.child = self.typeOf(elem).toIntern(),
.flags = .{
.alignment = InternPool.Alignment.fromNonzeroByteUnits(
result_ty.structFieldAlign(i, mod),
),
.alignment = result_ty.structFieldAlign(i, mod),
},
});
try self.store(field_ptr, field_ptr_ty, llvm_elem, .none);
@@ -9694,7 +9727,7 @@ pub const FuncGen = struct {
const llvm_usize = try o.lowerType(Type.usize);
const usize_zero = try o.builder.intValue(llvm_usize, 0);
const alignment = Builder.Alignment.fromByteUnits(result_ty.abiAlignment(mod));
const alignment = result_ty.abiAlignment(mod).toLlvm();
const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
const array_info = result_ty.arrayInfo(mod);
@@ -9770,7 +9803,7 @@ pub const FuncGen = struct {
// necessarily match the format that we need, depending on which tag is active.
// We must construct the correct unnamed struct type here, in order to then set
// the fields appropriately.
const alignment = Builder.Alignment.fromByteUnits(layout.abi_align);
const alignment = layout.abi_align.toLlvm();
const result_ptr = try self.buildAlloca(union_llvm_ty, alignment);
const llvm_payload = try self.resolveInst(extra.init);
const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
@@ -9799,7 +9832,7 @@ pub const FuncGen = struct {
const tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
var fields: [3]Builder.Type = undefined;
var fields_len: usize = 2;
if (layout.tag_align >= layout.payload_align) {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
fields = .{ tag_ty, payload_ty, undefined };
} else {
fields = .{ payload_ty, tag_ty, undefined };
@@ -9815,7 +9848,7 @@ pub const FuncGen = struct {
// tag and the payload.
const field_ptr_ty = try mod.ptrType(.{
.child = field_ty.toIntern(),
.flags = .{ .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align) },
.flags = .{ .alignment = field_align },
});
if (layout.tag_size == 0) {
const indices = [3]Builder.Value{ usize_zero, i32_zero, i32_zero };
@@ -9827,7 +9860,7 @@ pub const FuncGen = struct {
}
{
const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const indices: [3]Builder.Value =
.{ usize_zero, try o.builder.intValue(.i32, payload_index), i32_zero };
const len: usize = if (field_size == layout.payload_size) 2 else 3;
@@ -9836,12 +9869,12 @@ pub const FuncGen = struct {
try self.store(field_ptr, field_ptr_ty, llvm_payload, .none);
}
{
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
const indices: [2]Builder.Value = .{ usize_zero, try o.builder.intValue(.i32, tag_index) };
const field_ptr = try self.wip.gep(.inbounds, llvm_union_ty, result_ptr, &indices, "");
const tag_ty = try o.lowerType(union_obj.enum_tag_ty.toType());
const llvm_tag = try o.builder.intValue(tag_ty, tag_int);
const tag_alignment = Builder.Alignment.fromByteUnits(union_obj.enum_tag_ty.toType().abiAlignment(mod));
const tag_alignment = union_obj.enum_tag_ty.toType().abiAlignment(mod).toLlvm();
_ = try self.wip.store(.normal, llvm_tag, field_ptr, tag_alignment);
}
@@ -9978,7 +10011,7 @@ pub const FuncGen = struct {
variable_index.setMutability(.constant, &o.builder);
variable_index.setUnnamedAddr(.unnamed_addr, &o.builder);
variable_index.setAlignment(
Builder.Alignment.fromByteUnits(Type.slice_const_u8_sentinel_0.abiAlignment(mod)),
Type.slice_const_u8_sentinel_0.abiAlignment(mod).toLlvm(),
&o.builder,
);
@@ -10023,7 +10056,7 @@ pub const FuncGen = struct {
// We have a pointer and we need to return a pointer to the first field.
const payload_ptr = try fg.wip.gepStruct(opt_llvm_ty, opt_handle, 0, "");
const payload_alignment = Builder.Alignment.fromByteUnits(payload_ty.abiAlignment(mod));
const payload_alignment = payload_ty.abiAlignment(mod).toLlvm();
if (isByRef(payload_ty, mod)) {
if (can_elide_load)
return payload_ptr;
@@ -10050,7 +10083,7 @@ pub const FuncGen = struct {
const mod = o.module;
if (isByRef(optional_ty, mod)) {
const payload_alignment = Builder.Alignment.fromByteUnits(optional_ty.abiAlignment(mod));
const payload_alignment = optional_ty.abiAlignment(mod).toLlvm();
const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment);
{
@@ -10123,7 +10156,7 @@ pub const FuncGen = struct {
.Union => {
const layout = struct_ty.unionGetLayout(mod);
if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
const payload_index = @intFromBool(layout.tag_align >= layout.payload_align);
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
const union_llvm_ty = try o.lowerType(struct_ty);
return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
},
@@ -10142,9 +10175,7 @@ pub const FuncGen = struct {
const o = fg.dg.object;
const mod = o.module;
const pointee_llvm_ty = try o.lowerType(pointee_type);
const result_align = Builder.Alignment.fromByteUnits(
@max(ptr_alignment.toByteUnits() orelse 0, pointee_type.abiAlignment(mod)),
);
const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm();
const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align);
const size_bytes = pointee_type.abiSize(mod);
_ = try fg.wip.callMemCpy(
@@ -10168,9 +10199,11 @@ pub const FuncGen = struct {
const elem_ty = info.child.toType();
if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return .none;
const ptr_alignment = Builder.Alignment.fromByteUnits(
info.flags.alignment.toByteUnitsOptional() orelse elem_ty.abiAlignment(mod),
);
const ptr_alignment = (if (info.flags.alignment != .none)
@as(InternPool.Alignment, info.flags.alignment)
else
elem_ty.abiAlignment(mod)).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10201,7 +10234,7 @@ pub const FuncGen = struct {
const elem_llvm_ty = try o.lowerType(elem_ty);
if (isByRef(elem_ty, mod)) {
const result_align = Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod));
const result_align = elem_ty.abiAlignment(mod).toLlvm();
const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align);
const same_size_int = try o.builder.intType(@intCast(elem_bits));
@@ -10239,7 +10272,7 @@ pub const FuncGen = struct {
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
return;
}
const ptr_alignment = Builder.Alignment.fromByteUnits(ptr_ty.ptrAlignment(mod));
const ptr_alignment = ptr_ty.ptrAlignment(mod).toLlvm();
const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal;
@@ -10305,7 +10338,7 @@ pub const FuncGen = struct {
ptr,
ptr_alignment,
elem,
Builder.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)),
elem_ty.abiAlignment(mod).toLlvm(),
try o.builder.intValue(try o.lowerType(Type.usize), elem_ty.abiSize(mod)),
access_kind,
);
@@ -10337,7 +10370,7 @@ pub const FuncGen = struct {
if (!target_util.hasValgrindSupport(target)) return default_value;
const llvm_usize = try o.lowerType(Type.usize);
const usize_alignment = Builder.Alignment.fromByteUnits(Type.usize.abiAlignment(mod));
const usize_alignment = Type.usize.abiAlignment(mod).toLlvm();
const array_llvm_ty = try o.builder.arrayType(6, llvm_usize);
const array_ptr = if (fg.valgrind_client_request_array == .none) a: {
@@ -10718,6 +10751,7 @@ fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Err
fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
const mod = o.module;
const ip = &mod.intern_pool;
const return_type = fn_info.return_type.toType();
if (isScalar(mod, return_type)) {
return o.lowerType(return_type);
@@ -10761,12 +10795,16 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
const first_non_integer = std.mem.indexOfNone(x86_64_abi.Class, &classes, &.{.integer});
if (first_non_integer == null or classes[first_non_integer.?] == .none) {
assert(first_non_integer orelse classes.len == types_index);
if (mod.intern_pool.indexToKey(return_type.toIntern()) == .struct_type) {
var struct_it = return_type.iterateStructOffsets(mod);
while (struct_it.next()) |_| {}
assert((std.math.divCeil(u64, struct_it.offset, 8) catch unreachable) == types_index);
if (struct_it.offset % 8 > 0) types_buffer[types_index - 1] =
try o.builder.intType(@intCast(struct_it.offset % 8 * 8));
switch (ip.indexToKey(return_type.toIntern())) {
.struct_type => |struct_type| {
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
if (size % 8 > 0) {
types_buffer[types_index - 1] = try o.builder.intType(@intCast(size % 8 * 8));
}
},
else => {},
}
if (types_index == 1) return types_buffer[0];
}
@@ -10982,6 +11020,7 @@ const ParamTypeIterator = struct {
fn nextSystemV(it: *ParamTypeIterator, ty: Type) Allocator.Error!?Lowering {
const mod = it.object.module;
const ip = &mod.intern_pool;
const classes = x86_64_abi.classifySystemV(ty, mod, .arg);
if (classes[0] == .memory) {
it.zig_index += 1;
@@ -11037,12 +11076,17 @@ const ParamTypeIterator = struct {
it.llvm_index += 1;
return .abi_sized_int;
}
if (mod.intern_pool.indexToKey(ty.toIntern()) == .struct_type) {
var struct_it = ty.iterateStructOffsets(mod);
while (struct_it.next()) |_| {}
assert((std.math.divCeil(u64, struct_it.offset, 8) catch unreachable) == types_index);
if (struct_it.offset % 8 > 0) types_buffer[types_index - 1] =
try it.object.builder.intType(@intCast(struct_it.offset % 8 * 8));
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
if (size % 8 > 0) {
types_buffer[types_index - 1] =
try it.object.builder.intType(@intCast(size % 8 * 8));
}
},
else => {},
}
}
it.types_len = types_index;
@@ -11137,8 +11181,6 @@ fn isByRef(ty: Type, mod: *Module) bool {
.Array, .Frame => return ty.hasRuntimeBits(mod),
.Struct => {
// Packed structs are represented to LLVM as integers.
if (ty.containerLayout(mod) == .Packed) return false;
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
var count: usize = 0;
@@ -11154,14 +11196,18 @@ fn isByRef(ty: Type, mod: *Module) bool {
.struct_type => |s| s,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
var count: usize = 0;
for (struct_obj.fields.values()) |field| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
// Packed structs are represented to LLVM as integers.
if (struct_type.layout == .Packed) return false;
const field_types = struct_type.field_types.get(ip);
var it = struct_type.iterateRuntimeOrder(ip);
var count: usize = 0;
while (it.next()) |field_index| {
count += 1;
if (count > max_fields_byval) return true;
if (isByRef(field.ty, mod)) return true;
const field_ty = field_types[field_index].toType();
if (isByRef(field_ty, mod)) return true;
}
return false;
},
@@ -11362,11 +11408,11 @@ fn buildAllocaInner(
}
fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 {
return @intFromBool(Type.err_int.abiAlignment(mod) > payload_ty.abiAlignment(mod));
return @intFromBool(Type.err_int.abiAlignment(mod).compare(.gt, payload_ty.abiAlignment(mod)));
}
fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 {
return @intFromBool(Type.err_int.abiAlignment(mod) <= payload_ty.abiAlignment(mod));
return @intFromBool(Type.err_int.abiAlignment(mod).compare(.lte, payload_ty.abiAlignment(mod)));
}
/// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location
+26 -24
View File
@@ -792,24 +792,28 @@ pub const DeclGen = struct {
},
.vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}),
.struct_type => {
const struct_ty = mod.typeToStruct(ty).?;
if (struct_ty.layout == .Packed) {
const struct_type = mod.typeToStruct(ty).?;
if (struct_type.layout == .Packed) {
return dg.todo("packed struct constants", .{});
}
// TODO iterate with runtime order instead so that struct field
// reordering can be enabled for this backend.
const struct_begin = self.size;
for (struct_ty.fields.values(), 0..) |field, i| {
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue;
for (struct_type.field_types.get(ip), 0..) |field_ty, i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) continue;
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field.ty.toIntern(),
.ty = field_ty,
.storage = .{ .u64 = bytes[i] },
} }),
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
};
try self.lower(field.ty, field_val.toValue());
try self.lower(field_ty.toType(), field_val.toValue());
// Add padding if required.
// TODO: Add to type generation as well?
@@ -838,7 +842,7 @@ pub const DeclGen = struct {
const active_field_ty = union_obj.field_types.get(ip)[active_field].toType();
const has_tag = layout.tag_size != 0;
const tag_first = layout.tag_align >= layout.payload_align;
const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
if (has_tag and tag_first) {
try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue());
@@ -1094,7 +1098,7 @@ pub const DeclGen = struct {
val,
.UniformConstant,
false,
alignment,
@intCast(alignment.toByteUnits(0)),
);
log.debug("indirect constant: index = {}", .{@intFromEnum(spv_decl_index)});
try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {});
@@ -1180,7 +1184,7 @@ pub const DeclGen = struct {
var member_names = std.BoundedArray(CacheString, 4){};
const has_tag = layout.tag_size != 0;
const tag_first = layout.tag_align >= layout.payload_align;
const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
const u8_ty_ref = try self.intType(.unsigned, 8); // TODO: What if Int8Type is not enabled?
if (has_tag and tag_first) {
@@ -1333,7 +1337,7 @@ pub const DeclGen = struct {
} });
},
.Struct => {
const struct_ty = switch (ip.indexToKey(ty.toIntern())) {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
const member_types = try self.gpa.alloc(CacheRef, tuple.values.len);
defer self.gpa.free(member_types);
@@ -1350,13 +1354,12 @@ pub const DeclGen = struct {
.member_types = member_types[0..member_index],
} });
},
.struct_type => |struct_ty| struct_ty,
.struct_type => |struct_type| struct_type,
else => unreachable,
};
const struct_obj = mod.structPtrUnwrap(struct_ty.index).?;
if (struct_obj.layout == .Packed) {
return try self.resolveType(struct_obj.backing_int_ty, .direct);
if (struct_type.layout == .Packed) {
return try self.resolveType(struct_type.backingIntType(ip).toType(), .direct);
}
var member_types = std.ArrayList(CacheRef).init(self.gpa);
@@ -1365,16 +1368,15 @@ pub const DeclGen = struct {
var member_names = std.ArrayList(CacheString).init(self.gpa);
defer member_names.deinit();
var it = struct_obj.runtimeFieldIterator(mod);
while (it.next()) |field_and_index| {
const field = field_and_index.field;
const index = field_and_index.index;
const field_name = ip.stringToSlice(struct_obj.fields.keys()[index]);
try member_types.append(try self.resolveType(field.ty, .indirect));
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = struct_type.field_types.get(ip)[field_index];
const field_name = ip.stringToSlice(struct_type.field_names.get(ip)[field_index]);
try member_types.append(try self.resolveType(field_ty.toType(), .indirect));
try member_names.append(try self.spv.resolveString(field_name));
}
const name = ip.stringToSlice(try struct_obj.getFullyQualifiedName(self.module));
const name = ip.stringToSlice(try mod.declPtr(struct_type.decl.unwrap().?).getFullyQualifiedName(mod));
return try self.spv.resolve(.{ .struct_type = .{
.name = try self.spv.resolveString(name),
@@ -1500,7 +1502,7 @@ pub const DeclGen = struct {
const error_align = Type.anyerror.abiAlignment(mod);
const payload_align = payload_ty.abiAlignment(mod);
const error_first = error_align > payload_align;
const error_first = error_align.compare(.gt, payload_align);
return .{
.payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod),
.error_first = error_first,
@@ -1662,7 +1664,7 @@ pub const DeclGen = struct {
init_val,
actual_storage_class,
final_storage_class == .Generic,
@as(u32, @intCast(decl.alignment.toByteUnits(0))),
@intCast(decl.alignment.toByteUnits(0)),
);
}
}
@@ -2603,7 +2605,7 @@ pub const DeclGen = struct {
if (layout.payload_size == 0) return union_handle;
const tag_ty = un_ty.unionTagTypeSafety(mod).?;
const tag_index = @intFromBool(layout.tag_align < layout.payload_align);
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
return try self.extractField(tag_ty, union_handle, tag_index);
}
+4 -4
View File
@@ -1118,7 +1118,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
},
};
const required_alignment = tv.ty.abiAlignment(mod);
const required_alignment: u32 = @intCast(tv.ty.abiAlignment(mod).toByteUnits(0));
const atom = self.getAtomPtr(atom_index);
atom.size = @as(u32, @intCast(code.len));
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
@@ -1196,7 +1196,7 @@ fn updateLazySymbolAtom(
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
var required_alignment: u32 = undefined;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
@@ -1240,7 +1240,7 @@ fn updateLazySymbolAtom(
symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0)));
errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr });
@@ -1322,7 +1322,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(mod);
const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0));
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
+42 -28
View File
@@ -341,37 +341,51 @@ pub const DeclState = struct {
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
.struct_type => |struct_type| s: {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
.struct_type => |struct_type| {
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
if (struct_obj.layout == .Packed) {
if (struct_type.layout == .Packed) {
log.debug("TODO implement .debug_info for packed structs", .{});
break :blk;
}
for (
struct_obj.fields.keys(),
struct_obj.fields.values(),
0..,
) |field_name_ip, field, field_index| {
if (!field.ty.hasRuntimeBits(mod)) continue;
const field_name = ip.stringToSlice(field_name_ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field.ty, @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
if (struct_type.isTuple(ip)) {
for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| {
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{d}\x00", .{field_index});
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @as(u32, @intCast(index)));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
} else {
for (
struct_type.field_names.get(ip),
struct_type.field_types.get(ip),
struct_type.offsets.get(ip),
) |field_name_ip, field_ty, field_off| {
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_name = ip.stringToSlice(field_name_ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
}
},
else => unreachable,
@@ -416,8 +430,8 @@ pub const DeclState = struct {
.Union => {
const union_obj = mod.typeToUnion(ty).?;
const layout = mod.getUnionLayout(union_obj);
const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0;
const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size;
// TODO this is temporary to match current state of unions in Zig - we don't yet have
// safety checks implemented meaning the implicit tag is not yet stored and generated
// for untagged unions.
@@ -496,11 +510,11 @@ pub const DeclState = struct {
.ErrorUnion => {
const error_ty = ty.errorUnionSet(mod);
const payload_ty = ty.errorUnionPayload(mod);
const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod);
const payload_align = if (payload_ty.isNoReturn(mod)) .none else payload_ty.abiAlignment(mod);
const error_align = Type.anyerror.abiAlignment(mod);
const abi_size = ty.abiSize(mod);
const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0;
const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod);
const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(mod) else 0;
const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(mod);
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevKind.struct_type));
+26 -26
View File
@@ -409,7 +409,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const image_base = self.calcImageBase();
if (self.phdr_table_index == null) {
self.phdr_table_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_table_index = @intCast(self.phdrs.items.len);
const p_align: u16 = switch (self.ptr_width) {
.p32 => @alignOf(elf.Elf32_Phdr),
.p64 => @alignOf(elf.Elf64_Phdr),
@@ -428,7 +428,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_table_load_index == null) {
self.phdr_table_load_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_table_load_index = @intCast(self.phdrs.items.len);
// TODO Same as for GOT
try self.phdrs.append(gpa, .{
.p_type = elf.PT_LOAD,
@@ -444,7 +444,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_re_index == null) {
self.phdr_load_re_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_load_re_index = @intCast(self.phdrs.items.len);
const file_size = self.base.options.program_code_size_hint;
const p_align = self.page_size;
const off = self.findFreeSpace(file_size, p_align);
@@ -465,7 +465,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_got_index == null) {
self.phdr_got_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_got_index = @intCast(self.phdrs.items.len);
const file_size = @as(u64, ptr_size) * self.base.options.symbol_count_hint;
// We really only need ptr alignment but since we are using PROGBITS, linux requires
// page align.
@@ -490,7 +490,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_ro_index == null) {
self.phdr_load_ro_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_load_ro_index = @intCast(self.phdrs.items.len);
// TODO Find a hint about how much data need to be in rodata ?
const file_size = 1024;
// Same reason as for GOT
@@ -513,7 +513,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_rw_index == null) {
self.phdr_load_rw_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_load_rw_index = @intCast(self.phdrs.items.len);
// TODO Find a hint about how much data need to be in data ?
const file_size = 1024;
// Same reason as for GOT
@@ -536,7 +536,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.phdr_load_zerofill_index == null) {
self.phdr_load_zerofill_index = @as(u16, @intCast(self.phdrs.items.len));
self.phdr_load_zerofill_index = @intCast(self.phdrs.items.len);
const p_align = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size);
const off = self.phdrs.items[self.phdr_load_rw_index.?].p_offset;
log.debug("found PT_LOAD zerofill free space 0x{x} to 0x{x}", .{ off, off });
@@ -556,7 +556,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.shstrtab_section_index == null) {
self.shstrtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.shstrtab_section_index = @intCast(self.shdrs.items.len);
assert(self.shstrtab.buffer.items.len == 0);
try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
@@ -578,7 +578,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.strtab_section_index == null) {
self.strtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.strtab_section_index = @intCast(self.shdrs.items.len);
assert(self.strtab.buffer.items.len == 0);
try self.strtab.buffer.append(gpa, 0); // need a 0 at position 0
const off = self.findFreeSpace(self.strtab.buffer.items.len, 1);
@@ -600,7 +600,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.text_section_index == null) {
self.text_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.text_section_index = @intCast(self.shdrs.items.len);
const phdr = &self.phdrs.items[self.phdr_load_re_index.?];
try self.shdrs.append(gpa, .{
.sh_name = try self.shstrtab.insert(gpa, ".text"),
@@ -620,7 +620,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.got_section_index == null) {
self.got_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.got_section_index = @intCast(self.shdrs.items.len);
const phdr = &self.phdrs.items[self.phdr_got_index.?];
try self.shdrs.append(gpa, .{
.sh_name = try self.shstrtab.insert(gpa, ".got"),
@@ -639,7 +639,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.rodata_section_index == null) {
self.rodata_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.rodata_section_index = @intCast(self.shdrs.items.len);
const phdr = &self.phdrs.items[self.phdr_load_ro_index.?];
try self.shdrs.append(gpa, .{
.sh_name = try self.shstrtab.insert(gpa, ".rodata"),
@@ -659,7 +659,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.data_section_index == null) {
self.data_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.data_section_index = @intCast(self.shdrs.items.len);
const phdr = &self.phdrs.items[self.phdr_load_rw_index.?];
try self.shdrs.append(gpa, .{
.sh_name = try self.shstrtab.insert(gpa, ".data"),
@@ -679,7 +679,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.bss_section_index == null) {
self.bss_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.bss_section_index = @intCast(self.shdrs.items.len);
const phdr = &self.phdrs.items[self.phdr_load_zerofill_index.?];
try self.shdrs.append(gpa, .{
.sh_name = try self.shstrtab.insert(gpa, ".bss"),
@@ -699,7 +699,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.symtab_section_index == null) {
self.symtab_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.symtab_section_index = @intCast(self.shdrs.items.len);
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
@@ -714,7 +714,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.sh_size = file_size,
// The section header index of the associated string table.
.sh_link = self.strtab_section_index.?,
.sh_info = @as(u32, @intCast(self.symbols.items.len)),
.sh_info = @intCast(self.symbols.items.len),
.sh_addralign = min_align,
.sh_entsize = each_size,
});
@@ -723,7 +723,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.dwarf) |*dw| {
if (self.debug_str_section_index == null) {
self.debug_str_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.debug_str_section_index = @intCast(self.shdrs.items.len);
assert(dw.strtab.buffer.items.len == 0);
try dw.strtab.buffer.append(gpa, 0);
try self.shdrs.append(gpa, .{
@@ -743,7 +743,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_info_section_index == null) {
self.debug_info_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.debug_info_section_index = @intCast(self.shdrs.items.len);
const file_size_hint = 200;
const p_align = 1;
const off = self.findFreeSpace(file_size_hint, p_align);
@@ -768,7 +768,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_abbrev_section_index == null) {
self.debug_abbrev_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.debug_abbrev_section_index = @intCast(self.shdrs.items.len);
const file_size_hint = 128;
const p_align = 1;
const off = self.findFreeSpace(file_size_hint, p_align);
@@ -793,7 +793,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_aranges_section_index == null) {
self.debug_aranges_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.debug_aranges_section_index = @intCast(self.shdrs.items.len);
const file_size_hint = 160;
const p_align = 16;
const off = self.findFreeSpace(file_size_hint, p_align);
@@ -818,7 +818,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
if (self.debug_line_section_index == null) {
self.debug_line_section_index = @as(u16, @intCast(self.shdrs.items.len));
self.debug_line_section_index = @intCast(self.shdrs.items.len);
const file_size_hint = 250;
const p_align = 1;
const off = self.findFreeSpace(file_size_hint, p_align);
@@ -2666,12 +2666,12 @@ fn updateDeclCode(
const old_size = atom_ptr.size;
const old_vaddr = atom_ptr.value;
atom_ptr.alignment = math.log2_int(u64, required_alignment);
atom_ptr.alignment = required_alignment;
atom_ptr.size = code.len;
if (old_size > 0 and self.base.child_pid == null) {
const capacity = atom_ptr.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
const need_realloc = code.len > capacity or !required_alignment.check(sym.value);
if (need_realloc) {
try atom_ptr.grow(self);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, old_vaddr, atom_ptr.value });
@@ -2869,7 +2869,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
const mod = self.base.options.module.?;
const zig_module = self.file(self.zig_module_index.?).?.zig_module;
var required_alignment: u32 = undefined;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
@@ -2918,7 +2918,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
const atom_ptr = local_sym.atom(self).?;
atom_ptr.alive = true;
atom_ptr.name_offset = name_str_index;
atom_ptr.alignment = math.log2_int(u64, required_alignment);
atom_ptr.alignment = required_alignment;
atom_ptr.size = code.len;
try atom_ptr.allocate(self);
@@ -2995,7 +2995,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const atom_ptr = local_sym.atom(self).?;
atom_ptr.alive = true;
atom_ptr.name_offset = name_str_index;
atom_ptr.alignment = math.log2_int(u64, required_alignment);
atom_ptr.alignment = required_alignment;
atom_ptr.size = code.len;
try atom_ptr.allocate(self);
+8 -9
View File
@@ -11,7 +11,7 @@ file_index: File.Index = 0,
size: u64 = 0,
/// Alignment of this atom as a power of two.
alignment: u8 = 0,
alignment: Alignment = .@"1",
/// Index of the input section.
input_section_index: Index = 0,
@@ -42,6 +42,8 @@ fde_end: u32 = 0,
prev_index: Index = 0,
next_index: Index = 0,
pub const Alignment = @import("../../InternPool.zig").Alignment;
pub fn name(self: Atom, elf_file: *Elf) []const u8 {
return elf_file.strtab.getAssumeExists(self.name_offset);
}
@@ -112,7 +114,6 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
const free_list = &meta.free_list;
const last_atom_index = &meta.last_atom_index;
const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
const alignment = try std.math.powi(u64, 2, self.alignment);
// We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
@@ -136,7 +137,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = big_atom.value + cap;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = std.mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
@@ -163,7 +164,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
} else if (elf_file.atom(last_atom_index.*)) |last| {
const ideal_capacity = Elf.padToIdeal(last.size);
const ideal_capacity_end_vaddr = last.value + ideal_capacity;
const new_start_vaddr = std.mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = last.atom_index;
break :blk new_start_vaddr;
@@ -192,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
elf_file.debug_aranges_section_dirty = true;
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?);
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
@@ -224,10 +225,8 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void {
}
pub fn grow(self: *Atom, elf_file: *Elf) !void {
const alignment = try std.math.powi(u64, 2, self.alignment);
const align_ok = std.mem.alignBackward(u64, self.value, alignment) == self.value;
const need_realloc = !align_ok or self.size > self.capacity(elf_file);
if (need_realloc) try self.allocate(elf_file);
if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file))
try self.allocate(elf_file);
}
pub fn free(self: *Atom, elf_file: *Elf) void {
+4 -3
View File
@@ -181,10 +181,10 @@ fn addAtom(self: *Object, shdr: elf.Elf64_Shdr, shndx: u16, name: [:0]const u8,
const data = try self.shdrContents(shndx);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
atom.size = chdr.ch_size;
atom.alignment = math.log2_int(u64, chdr.ch_addralign);
atom.alignment = Alignment.fromNonzeroByteUnits(chdr.ch_addralign);
} else {
atom.size = shdr.sh_size;
atom.alignment = math.log2_int(u64, shdr.sh_addralign);
atom.alignment = Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
}
}
@@ -571,7 +571,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
atom.file = self.index;
atom.size = this_sym.st_size;
const alignment = this_sym.st_value;
atom.alignment = math.log2_int(u64, alignment);
atom.alignment = Alignment.fromNonzeroByteUnits(alignment);
var sh_flags: u32 = elf.SHF_ALLOC | elf.SHF_WRITE;
if (is_tls) sh_flags |= elf.SHF_TLS;
@@ -870,3 +870,4 @@ const Fde = eh_frame.Fde;
const File = @import("file.zig").File;
const StringTable = @import("../strtab.zig").StringTable;
const Symbol = @import("Symbol.zig");
const Alignment = Atom.Alignment;
+17 -18
View File
@@ -1425,7 +1425,7 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
const CreateAtomOpts = struct {
size: u64 = 0,
alignment: u32 = 0,
alignment: Alignment = .@"1",
};
pub fn createAtom(self: *MachO, sym_index: u32, opts: CreateAtomOpts) !Atom.Index {
@@ -1473,7 +1473,7 @@ pub fn createTentativeDefAtoms(self: *MachO) !void {
const atom_index = try self.createAtom(global.sym_index, .{
.size = size,
.alignment = alignment,
.alignment = @enumFromInt(alignment),
});
const atom = self.getAtomPtr(atom_index);
atom.file = global.file;
@@ -1493,7 +1493,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
const sym_index = try self.allocateSymbol();
const atom_index = try self.createAtom(sym_index, .{
.size = @sizeOf(u64),
.alignment = 3,
.alignment = .@"8",
});
try self.atom_by_index_table.putNoClobber(self.base.allocator, sym_index, atom_index);
@@ -1510,7 +1510,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
switch (self.mode) {
.zld => self.addAtomToSection(atom_index),
.incremental => {
sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
sym.n_value = try self.allocateAtom(atom_index, atom.size, .@"8");
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
try self.writeAtom(atom_index, &buffer);
@@ -1521,7 +1521,7 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: SymbolWithLoc) !Atom.Index {
const gpa = self.base.allocator;
const size = 3 * @sizeOf(u64);
const required_alignment: u32 = 1;
const required_alignment: Alignment = .@"1";
const sym_index = try self.allocateSymbol();
const atom_index = try self.createAtom(sym_index, .{});
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
@@ -2030,10 +2030,10 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
// capacity, insert a free list node for it.
}
fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value;
const align_ok = alignment.check(sym.n_value);
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
return self.allocateAtom(atom_index, new_atom_size, alignment);
@@ -2350,7 +2350,7 @@ fn updateLazySymbolAtom(
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
var required_alignment: u32 = undefined;
var required_alignment: Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
@@ -2617,7 +2617,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
sym.n_desc = 0;
const capacity = atom.capacity(self);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
const need_realloc = code_len > capacity or !required_alignment.check(sym.n_value);
if (need_realloc) {
const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
@@ -3204,7 +3204,7 @@ pub fn addAtomToSection(self: *MachO, atom_index: Atom.Index) void {
self.sections.set(sym.n_sect - 1, section);
}
fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: Alignment) !u64 {
const tracy = trace(@src());
defer tracy.end();
@@ -3247,7 +3247,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = sym.n_value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the atom that it points to has grown to take up
@@ -3276,11 +3276,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr);
atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForward(u64, segment.vmaddr, alignment);
break :blk alignment.forward(segment.vmaddr);
}
};
@@ -3295,10 +3295,8 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
self.segment_table_dirty = true;
}
const align_pow = @as(u32, @intCast(math.log2(alignment)));
if (header.@"align" < align_pow) {
header.@"align" = align_pow;
}
assert(alignment != .none);
header.@"align" = @min(header.@"align", @intFromEnum(alignment));
self.getAtomPtr(atom_index).size = new_atom_size;
if (atom.prev_index) |prev_index| {
@@ -3338,7 +3336,7 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u
pub fn writeSegmentHeaders(self: *MachO, writer: anytype) !void {
for (self.segments.items, 0..) |seg, i| {
const indexes = self.getSectionIndexes(@as(u8, @intCast(i)));
const indexes = self.getSectionIndexes(@intCast(i));
var out_seg = seg;
out_seg.cmdsize = @sizeOf(macho.segment_command_64);
out_seg.nsects = 0;
@@ -5526,6 +5524,7 @@ const Trie = @import("MachO/Trie.zig");
const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
const Alignment = Atom.Alignment;
pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
pub const Bind = @import("MachO/dyld_info/bind.zig").Bind(*const MachO, SymbolWithLoc);
+3 -1
View File
@@ -28,13 +28,15 @@ size: u64 = 0,
/// Alignment of this atom as a power of 2.
/// For instance, aligmment of 0 should be read as 2^0 = 1 byte aligned.
alignment: u32 = 0,
alignment: Alignment = .@"1",
/// Points to the previous and next neighbours
/// TODO use the same trick as with symbols: reserve index 0 as null atom
next_index: ?Index = null,
prev_index: ?Index = null,
pub const Alignment = @import("../../InternPool.zig").Alignment;
pub const Index = u32;
pub const Binding = struct {
+12 -8
View File
@@ -382,7 +382,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
const out_sect_id = (try Atom.getOutputSection(macho_file, sect)) orelse continue;
if (sect.size == 0) continue;
const sect_id = @as(u8, @intCast(id));
const sect_id: u8 = @intCast(id);
const sym_index = self.getSectionAliasSymbolIndex(sect_id);
const atom_index = try self.createAtomFromSubsection(
macho_file,
@@ -391,7 +391,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
sym_index,
1,
sect.size,
sect.@"align",
Alignment.fromLog2Units(sect.@"align"),
out_sect_id,
);
macho_file.addAtomToSection(atom_index);
@@ -470,7 +470,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
sym_index,
1,
atom_size,
sect.@"align",
Alignment.fromLog2Units(sect.@"align"),
out_sect_id,
);
if (!sect.isZerofill()) {
@@ -494,10 +494,10 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
else
sect.addr + sect.size - addr;
const atom_align = if (addr > 0)
const atom_align = Alignment.fromLog2Units(if (addr > 0)
@min(@ctz(addr), sect.@"align")
else
sect.@"align";
sect.@"align");
const atom_index = try self.createAtomFromSubsection(
macho_file,
@@ -532,7 +532,7 @@ pub fn splitRegularSections(self: *Object, macho_file: *MachO, object_id: u32) !
sect_start_index,
sect_loc.len,
sect.size,
sect.@"align",
Alignment.fromLog2Units(sect.@"align"),
out_sect_id,
);
if (!sect.isZerofill()) {
@@ -551,11 +551,14 @@ fn createAtomFromSubsection(
inner_sym_index: u32,
inner_nsyms_trailing: u32,
size: u64,
alignment: u32,
alignment: Alignment,
out_sect_id: u8,
) !Atom.Index {
const gpa = macho_file.base.allocator;
const atom_index = try macho_file.createAtom(sym_index, .{ .size = size, .alignment = alignment });
const atom_index = try macho_file.createAtom(sym_index, .{
.size = size,
.alignment = alignment,
});
const atom = macho_file.getAtomPtr(atom_index);
atom.inner_sym_index = inner_sym_index;
atom.inner_nsyms_trailing = inner_nsyms_trailing;
@@ -1115,3 +1118,4 @@ const MachO = @import("../MachO.zig");
const Platform = @import("load_commands.zig").Platform;
const SymbolWithLoc = MachO.SymbolWithLoc;
const UnwindInfo = @import("UnwindInfo.zig");
const Alignment = Atom.Alignment;
+7 -4
View File
@@ -104,7 +104,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void {
while (true) {
const atom = macho_file.getAtom(group_end);
offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment));
offset = atom.alignment.forward(offset);
const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = offset;
@@ -112,7 +112,7 @@ pub fn createThunks(macho_file: *MachO, sect_id: u8) !void {
macho_file.logAtom(group_end, log);
header.@"align" = @max(header.@"align", atom.alignment);
header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
allocated.putAssumeCapacityNoClobber(group_end, {});
@@ -196,7 +196,7 @@ fn allocateThunk(
macho_file.logAtom(atom_index, log);
header.@"align" = @max(header.@"align", atom.alignment);
header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
if (end_atom_index == atom_index) break;
@@ -326,7 +326,10 @@ fn isReachable(
fn createThunkAtom(macho_file: *MachO) !Atom.Index {
const sym_index = try macho_file.allocateSymbol();
const atom_index = try macho_file.createAtom(sym_index, .{ .size = @sizeOf(u32) * 3, .alignment = 2 });
const atom_index = try macho_file.createAtom(sym_index, .{
.size = @sizeOf(u32) * 3,
.alignment = .@"4",
});
const sym = macho_file.getSymbolPtr(.{ .sym_index = sym_index });
sym.n_type = macho.N_SECT;
sym.n_sect = macho_file.text_section_index.? + 1;
+3 -6
View File
@@ -985,19 +985,16 @@ fn calcSectionSizes(macho_file: *MachO) !void {
while (true) {
const atom = macho_file.getAtom(atom_index);
const atom_alignment = try math.powi(u32, 2, atom.alignment);
const atom_offset = mem.alignForward(u64, header.size, atom_alignment);
const atom_offset = atom.alignment.forward(header.size);
const padding = atom_offset - header.size;
const sym = macho_file.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = atom_offset;
header.size += padding + atom.size;
header.@"align" = @max(header.@"align", atom.alignment);
header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
if (atom.next_index) |next_index| {
atom_index = next_index;
} else break;
atom_index = atom.next_index orelse break;
}
}
+1 -1
View File
@@ -1106,7 +1106,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
var required_alignment: u32 = undefined;
var required_alignment: InternPool.Alignment = .none;
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
+23 -21
View File
@@ -187,8 +187,10 @@ debug_pubtypes_atom: ?Atom.Index = null,
/// rather than by the linker.
synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{},
pub const Alignment = types.Alignment;
pub const Segment = struct {
alignment: u32,
alignment: Alignment,
size: u32,
offset: u32,
flags: u32,
@@ -1490,7 +1492,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
try atom.code.appendSlice(wasm.base.allocator, code);
try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
atom.size = @as(u32, @intCast(code.len));
atom.size = @intCast(code.len);
if (code.len == 0) return;
atom.alignment = decl.getAlignment(mod);
}
@@ -2050,7 +2052,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
};
const segment: *Segment = &wasm.segments.items[final_index];
segment.alignment = @max(segment.alignment, atom.alignment);
segment.alignment = segment.alignment.max(atom.alignment);
try wasm.appendAtomAtIndex(final_index, atom_index);
}
@@ -2121,7 +2123,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
}
}
}
offset = std.mem.alignForward(u32, offset, atom.alignment);
offset = @intCast(atom.alignment.forward(offset));
atom.offset = offset;
log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{
symbol_loc.getName(wasm),
@@ -2132,7 +2134,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
offset += atom.size;
atom_index = atom.prev orelse break;
}
segment.size = std.mem.alignForward(u32, offset, segment.alignment);
segment.size = @intCast(segment.alignment.forward(offset));
}
}
@@ -2351,7 +2353,7 @@ fn createSyntheticFunction(
.offset = 0,
.sym_index = loc.index,
.file = null,
.alignment = 1,
.alignment = .@"1",
.next = null,
.prev = null,
.code = function_body.moveToUnmanaged(),
@@ -2382,11 +2384,11 @@ pub fn createFunction(
const atom_index = @as(Atom.Index, @intCast(wasm.managed_atoms.items.len));
const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
.size = @as(u32, @intCast(function_body.items.len)),
.size = @intCast(function_body.items.len),
.offset = 0,
.sym_index = loc.index,
.file = null,
.alignment = 1,
.alignment = .@"1",
.next = null,
.prev = null,
.code = function_body.moveToUnmanaged(),
@@ -2734,8 +2736,8 @@ fn setupMemory(wasm: *Wasm) !void {
const page_size = std.wasm.page_size; // 64kb
// Use the user-provided stack size or else we use 1MB by default
const stack_size = wasm.base.options.stack_size_override orelse page_size * 16;
const stack_alignment = 16; // wasm's stack alignment as specified by tool-convention
const heap_alignment = 16; // wasm's heap alignment as specified by tool-convention
const stack_alignment: Alignment = .@"16"; // wasm's stack alignment as specified by tool-convention
const heap_alignment: Alignment = .@"16"; // wasm's heap alignment as specified by tool-convention
// Always place the stack at the start by default
// unless the user specified the global-base flag
@@ -2748,7 +2750,7 @@ fn setupMemory(wasm: *Wasm) !void {
const is_obj = wasm.base.options.output_mode == .Obj;
if (place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr = stack_alignment.forward(memory_ptr);
memory_ptr += stack_size;
// We always put the stack pointer global at index 0
wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
@@ -2758,7 +2760,7 @@ fn setupMemory(wasm: *Wasm) !void {
var data_seg_it = wasm.data_segments.iterator();
while (data_seg_it.next()) |entry| {
const segment = &wasm.segments.items[entry.value_ptr.*];
memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment);
memory_ptr = segment.alignment.forward(memory_ptr);
// set TLS-related symbols
if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
@@ -2768,7 +2770,7 @@ fn setupMemory(wasm: *Wasm) !void {
}
if (wasm.findGlobalSymbol("__tls_align")) |loc| {
const sym = loc.getSymbol(wasm);
wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment);
wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?);
}
if (wasm.findGlobalSymbol("__tls_base")) |loc| {
const sym = loc.getSymbol(wasm);
@@ -2795,7 +2797,7 @@ fn setupMemory(wasm: *Wasm) !void {
}
if (!place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr = stack_alignment.forward(memory_ptr);
memory_ptr += stack_size;
wasm.wasm_globals.items[0].init.i32_const = @as(i32, @bitCast(@as(u32, @intCast(memory_ptr))));
}
@@ -2804,7 +2806,7 @@ fn setupMemory(wasm: *Wasm) !void {
// We must set its virtual address so it can be used in relocations.
if (wasm.findGlobalSymbol("__heap_base")) |loc| {
const symbol = loc.getSymbol(wasm);
symbol.virtual_address = @as(u32, @intCast(mem.alignForward(u64, memory_ptr, heap_alignment)));
symbol.virtual_address = @intCast(heap_alignment.forward(memory_ptr));
}
// Setup the max amount of pages
@@ -2879,7 +2881,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
flags |= @intFromEnum(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
}
try wasm.segments.append(wasm.base.allocator, .{
.alignment = 1,
.alignment = .@"1",
.size = 0,
.offset = 0,
.flags = flags,
@@ -2954,7 +2956,7 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
/// Appends a new segment with default field values
fn appendDummySegment(wasm: *Wasm) !void {
try wasm.segments.append(wasm.base.allocator, .{
.alignment = 1,
.alignment = .@"1",
.size = 0,
.offset = 0,
.flags = 0,
@@ -3011,7 +3013,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
// the pointers into the list using addends which are appended to the relocation.
const names_atom_index = try wasm.createAtom();
const names_atom = wasm.getAtomPtr(names_atom_index);
names_atom.alignment = 1;
names_atom.alignment = .@"1";
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
const names_symbol = &wasm.symbols.items[names_atom.sym_index];
names_symbol.* = .{
@@ -3085,7 +3087,7 @@ pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !
.flags = @intFromEnum(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
};
atom.alignment = 1; // debug sections are always 1-byte-aligned
atom.alignment = .@"1"; // debug sections are always 1-byte-aligned
return atom_index;
}
@@ -4724,12 +4726,12 @@ fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
for (wasm.segment_info.values()) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name,
@ctz(segment_info.alignment),
segment_info.alignment,
segment_info.flags,
});
try leb.writeULEB128(writer, @as(u32, @intCast(segment_info.name.len)));
try writer.writeAll(segment_info.name);
try leb.writeULEB128(writer, @ctz(segment_info.alignment));
try leb.writeULEB128(writer, segment_info.alignment.toLog2Units());
try leb.writeULEB128(writer, segment_info.flags);
}
+2 -2
View File
@@ -19,7 +19,7 @@ relocs: std.ArrayListUnmanaged(types.Relocation) = .{},
/// Contains the binary data of an atom, which can be non-relocated
code: std.ArrayListUnmanaged(u8) = .{},
/// For code this is 1, for data this is set to the highest value of all segments
alignment: u32,
alignment: Wasm.Alignment,
/// Offset into the section where the atom lives, this already accounts
/// for alignment.
offset: u32,
@@ -43,7 +43,7 @@ pub const Index = u32;
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
.alignment = 1,
.alignment = .@"1",
.file = null,
.next = null,
.offset = 0,
+7 -9
View File
@@ -8,6 +8,7 @@ const types = @import("types.zig");
const std = @import("std");
const Wasm = @import("../Wasm.zig");
const Symbol = @import("Symbol.zig");
const Alignment = types.Alignment;
const Allocator = std.mem.Allocator;
const leb = std.leb;
@@ -88,12 +89,9 @@ const RelocatableData = struct {
/// meta data of the given object file.
/// NOTE: Alignment is encoded as a power of 2, so we shift the symbol's
/// alignment to retrieve the natural alignment.
pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) u32 {
if (relocatable_data.type != .data) return 1;
const data_alignment = object.segment_info[relocatable_data.index].alignment;
if (data_alignment == 0) return 1;
// Decode from power of 2 to natural alignment
return @as(u32, 1) << @as(u5, @intCast(data_alignment));
pub fn getAlignment(relocatable_data: RelocatableData, object: *const Object) Alignment {
if (relocatable_data.type != .data) return .@"1";
return object.segment_info[relocatable_data.index].alignment;
}
/// Returns the symbol kind that corresponds to the relocatable section
@@ -671,7 +669,7 @@ fn Parser(comptime ReaderType: type) type {
try reader.readNoEof(name);
segment.* = .{
.name = name,
.alignment = try leb.readULEB128(u32, reader),
.alignment = @enumFromInt(try leb.readULEB128(u32, reader)),
.flags = try leb.readULEB128(u32, reader),
};
log.debug("Found segment: {s} align({d}) flags({b})", .{
@@ -919,7 +917,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
const atom_index = @as(Atom.Index, @intCast(wasm_bin.managed_atoms.items.len));
const atom_index: Atom.Index = @intCast(wasm_bin.managed_atoms.items.len);
const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
atom.file = object_index;
@@ -984,7 +982,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index];
if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned
segment.alignment = @max(segment.alignment, atom.alignment);
segment.alignment = segment.alignment.max(atom.alignment);
}
try wasm_bin.appendAtomAtIndex(final_index, atom_index);
+3 -1
View File
@@ -109,11 +109,13 @@ pub const SubsectionType = enum(u8) {
WASM_SYMBOL_TABLE = 8,
};
pub const Alignment = @import("../../InternPool.zig").Alignment;
pub const Segment = struct {
/// Segment's name, encoded as UTF-8 bytes.
name: []const u8,
/// The required alignment of the segment, encoded as a power of 2
alignment: u32,
alignment: Alignment,
/// Bitfield containing flags for a segment
flags: u32,
+7 -6
View File
@@ -1,6 +1,7 @@
const std = @import("std");
const Type = @import("type.zig").Type;
const AddressSpace = std.builtin.AddressSpace;
const Alignment = @import("InternPool.zig").Alignment;
pub const ArchOsAbi = struct {
arch: std.Target.Cpu.Arch,
@@ -595,13 +596,13 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
}
/// This function returns 1 if function alignment is not observable or settable.
pub fn defaultFunctionAlignment(target: std.Target) u32 {
pub fn defaultFunctionAlignment(target: std.Target) Alignment {
return switch (target.cpu.arch) {
.arm, .armeb => 4,
.aarch64, .aarch64_32, .aarch64_be => 4,
.sparc, .sparcel, .sparc64 => 4,
.riscv64 => 2,
else => 1,
.arm, .armeb => .@"4",
.aarch64, .aarch64_32, .aarch64_be => .@"4",
.sparc, .sparcel, .sparc64 => .@"4",
.riscv64 => .@"2",
else => .@"1",
};
}
+264 -401
View File
@@ -9,6 +9,7 @@ const target_util = @import("target.zig");
const TypedValue = @import("TypedValue.zig");
const Sema = @import("Sema.zig");
const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment;
/// Both types and values are canonically represented by a single 32-bit integer
/// which is an index into an `InternPool` data structure.
@@ -196,9 +197,11 @@ pub const Type = struct {
info.packed_offset.host_size != 0 or
info.flags.vector_index != .none)
{
const alignment = info.flags.alignment.toByteUnitsOptional() orelse
const alignment = if (info.flags.alignment != .none)
info.flags.alignment
else
info.child.toType().abiAlignment(mod);
try writer.print("align({d}", .{alignment});
try writer.print("align({d}", .{alignment.toByteUnits(0)});
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
try writer.print(":{d}:{d}", .{
@@ -315,8 +318,8 @@ pub const Type = struct {
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
const decl = mod.declPtr(struct_obj.owner_decl);
if (struct_type.decl.unwrap()) |decl_index| {
const decl = mod.declPtr(decl_index);
try decl.renderFullyQualifiedName(mod, writer);
} else if (struct_type.namespace.unwrap()) |namespace_index| {
const namespace = mod.namespacePtr(namespace_index);
@@ -561,24 +564,20 @@ pub const Type = struct {
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
// This struct has no fields.
return false;
};
if (struct_obj.status == .field_types_wip) {
if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
struct_obj.assumed_runtime_bits = true;
return true;
}
switch (strat) {
.sema => |sema| _ = try sema.resolveTypeFields(ty),
.eager => assert(struct_obj.haveFieldTypes()),
.lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy,
.eager => assert(struct_type.haveFieldTypes(ip)),
.lazy => if (!struct_type.haveFieldTypes(ip)) return error.NeedLazy,
}
for (struct_obj.fields.values()) |field| {
if (field.is_comptime) continue;
if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
for (0..struct_type.field_types.len) |i| {
if (struct_type.comptime_bits.getBit(ip, i)) continue;
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
return false;
@@ -728,11 +727,8 @@ pub const Type = struct {
=> false,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse {
// Struct with no fields has a well-defined layout of no bits.
return true;
};
return struct_obj.layout != .Auto;
// Struct with no fields have a well-defined layout of no bits.
return struct_type.layout != .Auto or struct_type.field_types.len == 0;
},
.union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
.none, .safety => union_type.flagsPtr(ip).layout != .Auto,
@@ -806,22 +802,23 @@ pub const Type = struct {
return mod.intern_pool.isNoReturn(ty.toIntern());
}
/// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
pub fn ptrAlignment(ty: Type, mod: *Module) u32 {
/// Returns `none` if the pointer is naturally aligned and the element type is 0-bit.
pub fn ptrAlignment(ty: Type, mod: *Module) Alignment {
return ptrAlignmentAdvanced(ty, mod, null) catch unreachable;
}
pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 {
pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !Alignment {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| {
if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| {
return @as(u32, @intCast(a));
} else if (opt_sema) |sema| {
if (ptr_type.flags.alignment != .none)
return ptr_type.flags.alignment;
if (opt_sema) |sema| {
const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema });
return res.scalar;
} else {
return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}
return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
},
.opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema),
else => unreachable,
@@ -836,8 +833,8 @@ pub const Type = struct {
};
}
/// Returns 0 for 0-bit types.
pub fn abiAlignment(ty: Type, mod: *Module) u32 {
/// Never returns `none`. Asserts that all necessary type resolution is already done.
pub fn abiAlignment(ty: Type, mod: *Module) Alignment {
return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar;
}
@@ -846,12 +843,12 @@ pub const Type = struct {
pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
.val => |val| return val,
.scalar => |x| return mod.intValue(Type.comptime_int, x),
.scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits(0)),
}
}
pub const AbiAlignmentAdvanced = union(enum) {
scalar: u32,
scalar: Alignment,
val: Value,
};
@@ -881,36 +878,36 @@ pub const Type = struct {
};
switch (ty.toIntern()) {
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 },
.empty_struct_type => return AbiAlignmentAdvanced{ .scalar = .@"1" },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| {
if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) };
if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = .@"1" };
return .{ .scalar = intAbiAlignment(int_type.bits, target) };
},
.ptr_type, .anyframe_type => {
return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
},
.array_type => |array_type| {
return array_type.child.toType().abiAlignmentAdvanced(mod, strat);
},
.vector_type => |vector_type| {
const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema);
const bits = @as(u32, @intCast(bits_u64));
const bits: u32 = @intCast(bits_u64);
const bytes = ((bits * vector_type.len) + 7) / 8;
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return AbiAlignmentAdvanced{ .scalar = alignment };
return .{ .scalar = Alignment.fromByteUnits(alignment) };
},
.opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat),
.error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()),
// TODO revisit this when we have the concept of the error tag type
.error_set_type, .inferred_error_set_type => return AbiAlignmentAdvanced{ .scalar = 2 },
.error_set_type, .inferred_error_set_type => return .{ .scalar = .@"2" },
// represents machine code; not a pointer
.func_type => |func_type| return AbiAlignmentAdvanced{
.scalar = if (func_type.alignment.toByteUnitsOptional()) |a|
@as(u32, @intCast(a))
.func_type => |func_type| return .{
.scalar = if (func_type.alignment != .none)
func_type.alignment
else
target_util.defaultFunctionAlignment(target),
},
@@ -926,47 +923,50 @@ pub const Type = struct {
.call_modifier,
.prefetch_options,
.anyopaque,
=> return AbiAlignmentAdvanced{ .scalar = 1 },
=> return .{ .scalar = .@"1" },
.usize,
.isize,
.export_options,
.extern_options,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.type_info,
=> return .{
.scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
},
.c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) },
.c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
.c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) },
.c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) },
.c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) },
.c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) },
.c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) },
.c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) },
.c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) },
.c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
.c_char => return .{ .scalar = cTypeAlign(target, .char) },
.c_short => return .{ .scalar = cTypeAlign(target, .short) },
.c_ushort => return .{ .scalar = cTypeAlign(target, .ushort) },
.c_int => return .{ .scalar = cTypeAlign(target, .int) },
.c_uint => return .{ .scalar = cTypeAlign(target, .uint) },
.c_long => return .{ .scalar = cTypeAlign(target, .long) },
.c_ulong => return .{ .scalar = cTypeAlign(target, .ulong) },
.c_longlong => return .{ .scalar = cTypeAlign(target, .longlong) },
.c_ulonglong => return .{ .scalar = cTypeAlign(target, .ulonglong) },
.c_longdouble => return .{ .scalar = cTypeAlign(target, .longdouble) },
.f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
.f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) },
.f16 => return .{ .scalar = .@"2" },
.f32 => return .{ .scalar = cTypeAlign(target, .float) },
.f64 => switch (target.c_type_bit_size(.double)) {
64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) },
else => return AbiAlignmentAdvanced{ .scalar = 8 },
64 => return .{ .scalar = cTypeAlign(target, .double) },
else => return .{ .scalar = .@"8" },
},
.f80 => switch (target.c_type_bit_size(.longdouble)) {
80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
80 => return .{ .scalar = cTypeAlign(target, .longdouble) },
else => {
const u80_ty: Type = .{ .ip_index = .u80_type };
return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) };
return .{ .scalar = abiAlignment(u80_ty, mod) };
},
},
.f128 => switch (target.c_type_bit_size(.longdouble)) {
128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => return AbiAlignmentAdvanced{ .scalar = 16 },
128 => return .{ .scalar = cTypeAlign(target, .longdouble) },
else => return .{ .scalar = .@"16" },
},
// TODO revisit this when we have the concept of the error tag type
.anyerror,
.adhoc_inferred_error_set,
=> return AbiAlignmentAdvanced{ .scalar = 2 },
=> return .{ .scalar = .@"2" },
.void,
.type,
@@ -975,90 +975,46 @@ pub const Type = struct {
.null,
.undefined,
.enum_literal,
.type_info,
=> return AbiAlignmentAdvanced{ .scalar = 0 },
=> return .{ .scalar = .@"1" },
.noreturn => unreachable,
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
return AbiAlignmentAdvanced{ .scalar = 0 };
if (opt_sema) |sema| {
if (struct_obj.status == .field_types_wip) {
// We'll guess "pointer-aligned", if the struct has an
// underaligned pointer field then some allocations
// might require explicit alignment.
return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
if (struct_type.layout == .Packed) {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
.val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue(),
},
.eager => {},
}
_ = try sema.resolveTypeFields(ty);
return .{ .scalar = struct_type.backingIntType(ip).toType().abiAlignment(mod) };
}
if (!struct_obj.haveFieldTypes()) switch (strat) {
.eager => unreachable, // struct layout not resolved
.sema => unreachable, // handled above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
const flags = struct_type.flagsPtr(ip).*;
if (flags.alignment != .none) return .{ .scalar = flags.alignment };
return switch (strat) {
.eager => unreachable, // struct alignment not resolved
.sema => |sema| .{
.scalar = try sema.resolveStructAlignment(ty.toIntern(), struct_type),
},
.lazy => .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
};
if (struct_obj.layout == .Packed) {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
.eager => {},
}
assert(struct_obj.haveLayout());
return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) };
}
const fields = ty.structFields(mod);
var big_align: u32 = 0;
for (fields.values()) |field| {
if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) {
error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
else => |e| return e,
})) continue;
const field_align = @as(u32, @intCast(field.abi_align.toByteUnitsOptional() orelse
switch (try field.ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |a| a,
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
.sema => unreachable, // handled above
.lazy => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
}));
big_align = @max(big_align, field_align);
// This logic is duplicated in Module.Struct.Field.alignment.
if (struct_obj.layout == .Extern or target.ofmt == .c) {
if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) {
// The C ABI requires 128 bit integer fields of structs
// to be 16-bytes aligned.
big_align = @max(big_align, 16);
}
}
}
return AbiAlignmentAdvanced{ .scalar = big_align };
},
.anon_struct_type => |tuple| {
var big_align: u32 = 0;
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
if (!(field_ty.toType().hasRuntimeBits(mod))) continue;
switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) {
.scalar => |field_align| big_align = @max(big_align, field_align),
.scalar => |field_align| big_align = big_align.max(field_align),
.val => switch (strat) {
.eager => unreachable, // field type alignment not resolved
.sema => unreachable, // passed to abiAlignmentAdvanced above
@@ -1069,7 +1025,7 @@ pub const Type = struct {
},
}
}
return AbiAlignmentAdvanced{ .scalar = big_align };
return .{ .scalar = big_align };
},
.union_type => |union_type| {
@@ -1078,7 +1034,7 @@ pub const Type = struct {
// We'll guess "pointer-aligned", if the union has an
// underaligned pointer field then some allocations
// might require explicit alignment.
return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) };
return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
}
_ = try sema.resolveTypeFields(ty);
}
@@ -1095,13 +1051,11 @@ pub const Type = struct {
if (union_obj.hasTag(ip)) {
return abiAlignmentAdvanced(union_obj.enum_tag_ty.toType(), mod, strat);
} else {
return AbiAlignmentAdvanced{
.scalar = @intFromBool(union_obj.flagsPtr(ip).layout == .Extern),
};
return .{ .scalar = .@"1" };
}
}
var max_align: u32 = 0;
var max_align: Alignment = .@"1";
if (union_obj.hasTag(ip)) max_align = union_obj.enum_tag_ty.toType().abiAlignment(mod);
for (0..union_obj.field_names.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index].toType();
@@ -1117,8 +1071,9 @@ pub const Type = struct {
else => |e| return e,
})) continue;
const field_align_bytes: u32 = @intCast(field_align.toByteUnitsOptional() orelse
switch (try field_ty.abiAlignmentAdvanced(mod, strat)) {
const field_align_bytes: Alignment = if (field_align != .none)
field_align
else switch (try field_ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |a| a,
.val => switch (strat) {
.eager => unreachable, // struct layout not resolved
@@ -1128,13 +1083,15 @@ pub const Type = struct {
.storage = .{ .lazy_align = ty.toIntern() },
} })).toValue() },
},
});
max_align = @max(max_align, field_align_bytes);
};
max_align = max_align.max(field_align_bytes);
}
return AbiAlignmentAdvanced{ .scalar = max_align };
return .{ .scalar = max_align };
},
.opaque_type => return .{ .scalar = .@"1" },
.enum_type => |enum_type| return .{
.scalar = enum_type.tag_ty.toType().abiAlignment(mod),
},
.opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 },
.enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) },
// values, not types
.undef,
@@ -1179,20 +1136,15 @@ pub const Type = struct {
} })).toValue() },
else => |e| return e,
})) {
return AbiAlignmentAdvanced{ .scalar = code_align };
return .{ .scalar = code_align };
}
return AbiAlignmentAdvanced{ .scalar = @max(
code_align,
return .{ .scalar = code_align.max(
(try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar,
) };
},
.lazy => {
switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) {
.scalar => |payload_align| {
return AbiAlignmentAdvanced{
.scalar = @max(code_align, payload_align),
};
},
.scalar => |payload_align| return .{ .scalar = code_align.max(payload_align) },
.val => {},
}
return .{ .val = (try mod.intern(.{ .int = .{
@@ -1212,9 +1164,11 @@ pub const Type = struct {
const child_type = ty.optionalChild(mod);
switch (child_type.zigTypeTag(mod)) {
.Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.Pointer => return .{
.scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
},
.ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
.NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 },
.NoReturn => return .{ .scalar = .@"1" },
else => {},
}
@@ -1227,12 +1181,12 @@ pub const Type = struct {
} })).toValue() },
else => |e| return e,
})) {
return AbiAlignmentAdvanced{ .scalar = 1 };
return .{ .scalar = .@"1" };
}
return child_type.abiAlignmentAdvanced(mod, strat);
},
.lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) {
.scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) },
.scalar => |x| return .{ .scalar = x.max(.@"1") },
.val => return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_align = ty.toIntern() },
@@ -1310,8 +1264,7 @@ pub const Type = struct {
.storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
};
const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
const elem_bits = @as(u32, @intCast(elem_bits_u64));
const elem_bits = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema);
const total_bits = elem_bits * vector_type.len;
const total_bytes = (total_bits + 7) / 8;
const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) {
@@ -1321,8 +1274,7 @@ pub const Type = struct {
.storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
};
const result = std.mem.alignForward(u32, total_bytes, alignment);
return AbiSizeAdvanced{ .scalar = result };
return AbiSizeAdvanced{ .scalar = alignment.forward(total_bytes) };
},
.opt_type => return ty.abiSizeAdvancedOptional(mod, strat),
@@ -1360,16 +1312,16 @@ pub const Type = struct {
};
var size: u64 = 0;
if (code_align > payload_align) {
if (code_align.compare(.gt, payload_align)) {
size += code_size;
size = std.mem.alignForward(u64, size, payload_align);
size = payload_align.forward(size);
size += payload_size;
size = std.mem.alignForward(u64, size, code_align);
size = code_align.forward(size);
} else {
size += payload_size;
size = std.mem.alignForward(u64, size, code_align);
size = code_align.forward(size);
size += code_size;
size = std.mem.alignForward(u64, size, payload_align);
size = payload_align.forward(size);
}
return AbiSizeAdvanced{ .scalar = size };
},
@@ -1435,41 +1387,35 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
.struct_type => |struct_type| switch (ty.containerLayout(mod)) {
.Packed => {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
return AbiSizeAdvanced{ .scalar = 0 };
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() },
.eager => {},
}
assert(struct_obj.haveLayout());
return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) };
},
else => {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse
return AbiSizeAdvanced{ .scalar = 0 };
if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
} })).toValue() };
.struct_type => |struct_type| {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => switch (struct_type.layout) {
.Packed => {
if (struct_type.backingIntType(ip).* == .none) return .{
.val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
} })).toValue(),
};
},
.eager => {},
}
const field_count = ty.structFieldCount(mod);
if (field_count == 0) {
return AbiSizeAdvanced{ .scalar = 0 };
}
return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
},
.Auto, .Extern => {
if (!struct_type.haveLayout(ip)) return .{
.val = (try mod.intern(.{ .int = .{
.ty = .comptime_int_type,
.storage = .{ .lazy_size = ty.toIntern() },
} })).toValue(),
};
},
},
.eager => {},
}
return switch (struct_type.layout) {
.Packed => .{
.scalar = struct_type.backingIntType(ip).toType().abiSize(mod),
},
.Auto, .Extern => .{ .scalar = struct_type.size(ip).* },
};
},
.anon_struct_type => |tuple| {
switch (strat) {
@@ -1565,20 +1511,19 @@ pub const Type = struct {
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
return AbiSizeAdvanced{
.scalar = child_ty.abiAlignment(mod) + payload_size,
.scalar = child_ty.abiAlignment(mod).toByteUnits(0) + payload_size,
};
}
fn intAbiSize(bits: u16, target: Target) u64 {
const alignment = intAbiAlignment(bits, target);
return std.mem.alignForward(u64, @as(u16, @intCast((@as(u17, bits) + 7) / 8)), alignment);
return intAbiAlignment(bits, target).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8)));
}
fn intAbiAlignment(bits: u16, target: Target) u32 {
return @min(
fn intAbiAlignment(bits: u16, target: Target) Alignment {
return Alignment.fromByteUnits(@min(
std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))),
target.maxIntAlignment(),
);
));
}
pub fn bitSize(ty: Type, mod: *Module) u64 {
@@ -1610,7 +1555,7 @@ pub const Type = struct {
const len = array_type.len + @intFromBool(array_type.sentinel != .none);
if (len == 0) return 0;
const elem_ty = array_type.child.toType();
const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod));
const elem_size = @max(elem_ty.abiAlignment(mod).toByteUnits(0), elem_ty.abiSize(mod));
if (elem_size == 0) return 0;
const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema);
return (len - 1) * 8 * elem_size + elem_bit_size;
@@ -1675,35 +1620,33 @@ pub const Type = struct {
.enum_literal => unreachable,
.generic_poison => unreachable,
.atomic_order => unreachable, // missing call to resolveTypeFields
.atomic_rmw_op => unreachable, // missing call to resolveTypeFields
.calling_convention => unreachable, // missing call to resolveTypeFields
.address_space => unreachable, // missing call to resolveTypeFields
.float_mode => unreachable, // missing call to resolveTypeFields
.reduce_op => unreachable, // missing call to resolveTypeFields
.call_modifier => unreachable, // missing call to resolveTypeFields
.prefetch_options => unreachable, // missing call to resolveTypeFields
.export_options => unreachable, // missing call to resolveTypeFields
.extern_options => unreachable, // missing call to resolveTypeFields
.type_info => unreachable, // missing call to resolveTypeFields
.atomic_order => unreachable,
.atomic_rmw_op => unreachable,
.calling_convention => unreachable,
.address_space => unreachable,
.float_mode => unreachable,
.reduce_op => unreachable,
.call_modifier => unreachable,
.prefetch_options => unreachable,
.export_options => unreachable,
.extern_options => unreachable,
.type_info => unreachable,
},
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
if (struct_obj.layout != .Packed) {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
if (struct_type.layout == .Packed) {
if (opt_sema) |sema| try sema.resolveTypeLayout(ty);
return try struct_type.backingIntType(ip).*.toType().bitSizeAdvanced(mod, opt_sema);
}
if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty);
assert(struct_obj.haveLayout());
return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema);
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
},
.anon_struct_type => {
if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
if (opt_sema) |sema| try sema.resolveTypeFields(ty);
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
},
.union_type => |union_type| {
if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty);
if (opt_sema) |sema| try sema.resolveTypeFields(ty);
if (ty.containerLayout(mod) != .Packed) {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
@@ -1749,13 +1692,7 @@ pub const Type = struct {
pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| {
return struct_obj.haveLayout();
} else {
return true;
}
},
.struct_type => |struct_type| struct_type.haveLayout(ip),
.union_type => |union_type| union_type.haveLayout(ip),
.array_type => |array_type| {
if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
@@ -2020,10 +1957,7 @@ pub const Type = struct {
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto;
return struct_obj.layout;
},
.struct_type => |struct_type| struct_type.layout,
.anon_struct_type => .Auto,
.union_type => |union_type| union_type.flagsPtr(ip).layout,
else => unreachable,
@@ -2136,10 +2070,7 @@ pub const Type = struct {
return switch (ip.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
.array_type => |array_type| array_type.len,
.struct_type => |struct_type| {
const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0;
return struct_obj.fields.count();
},
.struct_type => |struct_type| struct_type.field_types.len,
.anon_struct_type => |tuple| tuple.types.len,
else => unreachable,
@@ -2214,6 +2145,7 @@ pub const Type = struct {
/// Asserts the type is an integer, enum, error set, or vector of one of them.
pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType {
const ip = &mod.intern_pool;
const target = mod.getTarget();
var ty = starting_ty;
@@ -2233,13 +2165,9 @@ pub const Type = struct {
.c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) },
.c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) },
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type,
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.layout == .Packed);
ty = struct_obj.backing_int_ty;
},
.struct_type => |t| ty = t.backingIntType(ip).*.toType(),
.enum_type => |enum_type| ty = enum_type.tag_ty.toType(),
.vector_type => |vector_type| ty = vector_type.child.toType(),
@@ -2503,33 +2431,28 @@ pub const Type = struct {
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
if (mod.structPtrUnwrap(struct_type.index)) |s| {
assert(s.haveFieldTypes());
const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count());
defer mod.gpa.free(field_vals);
for (field_vals, s.fields.values()) |*field_val, field| {
if (field.is_comptime) {
field_val.* = field.default_val;
continue;
}
if (try field.ty.onePossibleValue(mod)) |field_opv| {
field_val.* = try field_opv.intern(field.ty, mod);
} else return null;
assert(struct_type.haveFieldTypes(ip));
if (struct_type.knownNonOpv(ip))
return null;
const field_vals = try mod.gpa.alloc(InternPool.Index, struct_type.field_types.len);
defer mod.gpa.free(field_vals);
for (field_vals, 0..) |*field_val, i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_type.fieldIsComptime(ip, i)) {
field_val.* = struct_type.field_inits.get(ip)[i];
continue;
}
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
} })).toValue();
const field_ty = struct_type.field_types.get(ip)[i].toType();
if (try field_ty.onePossibleValue(mod)) |field_opv| {
field_val.* = try field_opv.intern(field_ty, mod);
} else return null;
}
// In this case the struct has no fields at all and
// In this case the struct has no runtime-known fields and
// therefore has one possible value.
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = &.{} },
.storage = .{ .elems = field_vals },
} })).toValue();
},
@@ -2715,18 +2638,20 @@ pub const Type = struct {
=> true,
},
.struct_type => |struct_type| {
// packed structs cannot be comptime-only because they have a well-defined
// memory layout and every field has a well-defined bit pattern.
if (struct_type.layout == .Packed)
return false;
// A struct with no fields is not comptime-only.
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
switch (struct_obj.requires_comptime) {
.wip, .unknown => {
// Return false to avoid incorrect dependency loops.
// This will be handled correctly once merged with
// `Sema.typeRequiresComptime`.
return false;
},
.no => return false,
.yes => return true,
}
return switch (struct_type.flagsPtr(ip).requires_comptime) {
// Return false to avoid incorrect dependency loops.
// This will be handled correctly once merged with
// `Sema.typeRequiresComptime`.
.wip, .unknown => false,
.no => false,
.yes => true,
};
},
.anon_struct_type => |tuple| {
@@ -2982,37 +2907,31 @@ pub const Type = struct {
return enum_type.tagValueIndex(ip, int_tag);
}
pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields {
switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{};
assert(struct_obj.haveFieldTypes());
return struct_obj.fields;
},
else => unreachable,
}
}
pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
/// Returns none in the case of a tuple which uses the integer index as the field name.
pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveFieldTypes());
return struct_obj.fields.keys()[field_index];
},
.anon_struct_type => |anon_struct| anon_struct.names.get(ip)[field_index],
.struct_type => |struct_type| struct_type.fieldName(ip, field_index),
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index),
else => unreachable,
};
}
pub fn structFieldCount(ty: Type, mod: *Module) usize {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0;
assert(struct_obj.haveFieldTypes());
return struct_obj.fields.count();
},
/// When struct types have no field names, the names are implicitly understood to be
/// strings corresponding to the field indexes in declaration order. It used to be the
/// case that a NullTerminatedString would be stored for each field in this case, however,
/// now, callers must handle the possibility that there are no names stored at all.
/// Here we fake the previous behavior. Probably something better could be done by examining
/// all the callsites of this function.
pub fn legacyStructFieldName(ty: Type, i: u32, mod: *Module) InternPool.NullTerminatedString {
return ty.structFieldName(i, mod).unwrap() orelse
mod.intern_pool.getOrPutStringFmt(mod.gpa, "{d}", .{i}) catch @panic("OOM");
}
pub fn structFieldCount(ty: Type, mod: *Module) u32 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.field_types.len,
.anon_struct_type => |anon_struct| anon_struct.types.len,
else => unreachable,
};
@@ -3022,11 +2941,7 @@ pub const Type = struct {
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveFieldTypes());
return struct_obj.fields.values()[index].ty;
},
.struct_type => |struct_type| struct_type.field_types.get(ip)[index].toType(),
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
return union_obj.field_types.get(ip)[index].toType();
@@ -3036,13 +2951,14 @@ pub const Type = struct {
};
}
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 {
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.layout != .Packed);
return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout);
assert(struct_type.layout != .Packed);
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = struct_type.field_types.get(ip)[index].toType();
return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
},
.anon_struct_type => |anon_struct| {
return anon_struct.types.get(ip)[index].toType().abiAlignment(mod);
@@ -3059,8 +2975,7 @@ pub const Type = struct {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const val = struct_obj.fields.values()[index].default_val;
const val = struct_type.fieldInit(ip, index);
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
return val.toValue();
@@ -3079,12 +2994,10 @@ pub const Type = struct {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
const field = struct_obj.fields.values()[index];
if (field.is_comptime) {
return field.default_val.toValue();
if (struct_type.fieldIsComptime(ip, index)) {
return struct_type.field_inits.get(ip)[index].toValue();
} else {
return field.ty.onePossibleValue(mod);
return struct_type.field_types.get(ip)[index].toType().onePossibleValue(mod);
}
},
.anon_struct_type => |tuple| {
@@ -3102,30 +3015,25 @@ pub const Type = struct {
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
if (struct_obj.layout == .Packed) return false;
const field = struct_obj.fields.values()[index];
return field.is_comptime;
},
.struct_type => |struct_type| struct_type.fieldIsComptime(ip, index),
.anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
else => unreachable,
};
}
pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 {
const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.layout == .Packed);
const ip = &mod.intern_pool;
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
assert(struct_type.layout == .Packed);
comptime assert(Type.packed_struct_layout_version == 2);
var bit_offset: u16 = undefined;
var elem_size_bits: u16 = undefined;
var running_bits: u16 = 0;
for (struct_obj.fields.values(), 0..) |f, i| {
if (!f.ty.hasRuntimeBits(mod)) continue;
for (struct_type.field_types.get(ip), 0..) |field_ty, i| {
if (!field_ty.toType().hasRuntimeBits(mod)) continue;
const field_bits = @as(u16, @intCast(f.ty.bitSize(mod)));
const field_bits: u16 = @intCast(field_ty.toType().bitSize(mod));
if (i == field_index) {
bit_offset = running_bits;
elem_size_bits = field_bits;
@@ -3141,68 +3049,19 @@ pub const Type = struct {
offset: u64,
};
pub const StructOffsetIterator = struct {
field: usize = 0,
offset: u64 = 0,
big_align: u32 = 0,
struct_obj: *Module.Struct,
module: *Module,
pub fn next(it: *StructOffsetIterator) ?FieldOffset {
const mod = it.module;
var i = it.field;
if (it.struct_obj.fields.count() <= i)
return null;
if (it.struct_obj.optimized_order) |some| {
i = some[i];
if (i == Module.Struct.omitted_field) return null;
}
const field = it.struct_obj.fields.values()[i];
it.field += 1;
if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) {
return FieldOffset{ .field = i, .offset = it.offset };
}
const field_align = field.alignment(mod, it.struct_obj.layout);
it.big_align = @max(it.big_align, field_align);
const field_offset = std.mem.alignForward(u64, it.offset, field_align);
it.offset = field_offset + field.ty.abiSize(mod);
return FieldOffset{ .field = i, .offset = field_offset };
}
};
/// Get an iterator that iterates over all the struct field, returning the field and
/// offset of that field. Asserts that the type is a non-packed struct.
pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator {
const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type;
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveLayout());
assert(struct_obj.layout != .Packed);
return .{ .struct_obj = struct_obj, .module = mod };
}
/// Supports structs and unions.
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
assert(struct_obj.haveLayout());
assert(struct_obj.layout != .Packed);
var it = ty.iterateStructOffsets(mod);
while (it.next()) |field_offset| {
if (index == field_offset.field)
return field_offset.offset;
}
return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1));
assert(struct_type.haveLayout(ip));
assert(struct_type.layout != .Packed);
return struct_type.offsets.get(ip)[index];
},
.anon_struct_type => |tuple| {
var offset: u64 = 0;
var big_align: u32 = 0;
var big_align: Alignment = .none;
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, i| {
if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) {
@@ -3212,12 +3071,12 @@ pub const Type = struct {
}
const field_align = field_ty.toType().abiAlignment(mod);
big_align = @max(big_align, field_align);
offset = std.mem.alignForward(u64, offset, field_align);
big_align = big_align.max(field_align);
offset = field_align.forward(offset);
if (i == index) return offset;
offset += field_ty.toType().abiSize(mod);
}
offset = std.mem.alignForward(u64, offset, @max(big_align, 1));
offset = big_align.max(.@"1").forward(offset);
return offset;
},
@@ -3226,9 +3085,9 @@ pub const Type = struct {
return 0;
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
if (layout.tag_align >= layout.payload_align) {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// {Tag, Payload}
return std.mem.alignForward(u64, layout.tag_size, layout.payload_align);
return layout.payload_align.forward(layout.tag_size);
} else {
// {Payload, Tag}
return 0;
@@ -3246,8 +3105,7 @@ pub const Type = struct {
pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index).?;
return struct_obj.srcLoc(mod);
return mod.declPtr(struct_type.decl.unwrap() orelse return null).srcLoc(mod);
},
.union_type => |union_type| {
return mod.declPtr(union_type.decl).srcLoc(mod);
@@ -3264,10 +3122,7 @@ pub const Type = struct {
pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null;
return struct_obj.owner_decl;
},
.struct_type => |struct_type| struct_type.decl.unwrap(),
.union_type => |union_type| union_type.decl,
.opaque_type => |opaque_type| opaque_type.decl,
.enum_type => |enum_type| enum_type.decl,
@@ -3280,10 +3135,12 @@ pub const Type = struct {
}
pub fn isTuple(ty: Type, mod: *Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
return struct_obj.is_tuple;
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
},
.anon_struct_type => |anon_struct| anon_struct.names.len == 0,
else => false,
@@ -3299,10 +3156,12 @@ pub const Type = struct {
}
pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false;
return struct_obj.is_tuple;
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
},
.anon_struct_type => true,
else => false,
@@ -3391,3 +3250,7 @@ pub const Type = struct {
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
};
fn cTypeAlign(target: Target, c_type: Target.CType) Alignment {
return Alignment.fromByteUnits(target.c_type_alignment(c_type));
}
+131 -115
View File
@@ -462,7 +462,7 @@ pub const Value = struct {
if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType());
const x = switch (int.storage) {
else => unreachable,
.lazy_align => ty.toType().abiAlignment(mod),
.lazy_align => ty.toType().abiAlignment(mod).toByteUnits(0),
.lazy_size => ty.toType().abiSize(mod),
};
return BigIntMutable.init(&space.limbs, x).toConst();
@@ -523,9 +523,9 @@ pub const Value = struct {
.u64 => |x| x,
.i64 => |x| std.math.cast(u64, x),
.lazy_align => |ty| if (opt_sema) |sema|
(try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar
(try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
else
ty.toType().abiAlignment(mod),
ty.toType().abiAlignment(mod).toByteUnits(0),
.lazy_size => |ty| if (opt_sema) |sema|
(try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar
else
@@ -569,9 +569,9 @@ pub const Value = struct {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.to(i64) catch unreachable,
.i64 => |x| x,
.u64 => |x| @as(i64, @intCast(x)),
.lazy_align => |ty| @as(i64, @intCast(ty.toType().abiAlignment(mod))),
.lazy_size => |ty| @as(i64, @intCast(ty.toType().abiSize(mod))),
.u64 => |x| @intCast(x),
.lazy_align => |ty| @intCast(ty.toType().abiAlignment(mod).toByteUnits(0)),
.lazy_size => |ty| @intCast(ty.toType().abiSize(mod)),
},
else => unreachable,
},
@@ -612,10 +612,11 @@ pub const Value = struct {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef(mod)) {
const size = @as(usize, @intCast(ty.abiSize(mod)));
const size: usize = @intCast(ty.abiSize(mod));
@memset(buffer[0..size], 0xaa);
return;
}
const ip = &mod.intern_pool;
switch (ty.zigTypeTag(mod)) {
.Void => {},
.Bool => {
@@ -656,40 +657,44 @@ pub const Value = struct {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
.Struct => switch (ty.containerLayout(mod)) {
.Auto => return error.IllDefinedMemoryLayout,
.Extern => for (ty.structFields(mod).values(), 0..) |field, i| {
const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
const field_val = switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => {
buffer[off] = val.castTag(.bytes).?.data[i];
continue;
.Struct => {
const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
switch (struct_type.layout) {
.Auto => return error.IllDefinedMemoryLayout,
.Extern => for (0..struct_type.field_types.len) |i| {
const off: usize = @intCast(ty.structFieldOffset(i, mod));
const field_val = switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => {
buffer[off] = val.castTag(.bytes).?.data[i];
continue;
},
.aggregate => val.castTag(.aggregate).?.data[i],
.repeated => val.castTag(.repeated).?.data,
else => unreachable,
},
.aggregate => val.castTag(.aggregate).?.data[i],
.repeated => val.castTag(.repeated).?.data,
else => unreachable,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes[i];
continue;
},
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
}.toValue(),
};
try writeToMemory(field_val, field.ty, mod, buffer[off..]);
},
.Packed => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
else => switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes[i];
continue;
},
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
}.toValue(),
};
const field_ty = struct_type.field_types.get(ip)[i].toType();
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
},
.Packed => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
}
},
.ErrorSet => {
// TODO revisit this when we have the concept of the error tag type
const Int = u16;
const name = switch (mod.intern_pool.indexToKey(val.toIntern())) {
const name = switch (ip.indexToKey(val.toIntern())) {
.err => |err| err.name,
.error_union => |error_union| error_union.val.err_name,
else => unreachable,
@@ -790,24 +795,24 @@ pub const Value = struct {
bits += elem_bit_size;
}
},
.Struct => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => unreachable, // Handled in non-packed writeToMemory
.Packed => {
var bits: u16 = 0;
const fields = ty.structFields(mod).values();
const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
for (fields, 0..) |field, i| {
const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
const field_val = switch (storage) {
.bytes => unreachable,
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
};
try field_val.toValue().writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
.Struct => {
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
assert(struct_type.layout == .Packed);
var bits: u16 = 0;
const storage = ip.indexToKey(val.toIntern()).aggregate.storage;
for (0..struct_type.field_types.len) |i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
const field_val = switch (storage) {
.bytes => unreachable,
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
};
try field_val.toValue().writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits);
bits += field_bits;
}
},
.Union => {
const union_obj = mod.typeToUnion(ty).?;
@@ -852,6 +857,7 @@ pub const Value = struct {
buffer: []const u8,
arena: Allocator,
) Allocator.Error!Value {
const ip = &mod.intern_pool;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag(mod)) {
@@ -926,25 +932,29 @@ pub const Value = struct {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
.Struct => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => {
const fields = ty.structFields(mod).values();
const field_vals = try arena.alloc(InternPool.Index, fields.len);
for (field_vals, fields, 0..) |*field_val, field, i| {
const off = @as(usize, @intCast(ty.structFieldOffset(i, mod)));
const sz = @as(usize, @intCast(field.ty.abiSize(mod)));
field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod);
}
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
} })).toValue();
},
.Packed => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
.Struct => {
const struct_type = mod.typeToStruct(ty).?;
switch (struct_type.layout) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => {
const field_types = struct_type.field_types;
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
for (field_vals, 0..) |*field_val, i| {
const field_ty = field_types.get(ip)[i].toType();
const off: usize = @intCast(ty.structFieldOffset(i, mod));
const sz: usize = @intCast(field_ty.abiSize(mod));
field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod);
}
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
} })).toValue();
},
.Packed => {
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
},
}
},
.ErrorSet => {
// TODO revisit this when we have the concept of the error tag type
@@ -992,6 +1002,7 @@ pub const Value = struct {
bit_offset: usize,
arena: Allocator,
) Allocator.Error!Value {
const ip = &mod.intern_pool;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag(mod)) {
@@ -1070,23 +1081,22 @@ pub const Value = struct {
.storage = .{ .elems = elems },
} })).toValue();
},
.Struct => switch (ty.containerLayout(mod)) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
.Extern => unreachable, // Handled by non-packed readFromMemory
.Packed => {
var bits: u16 = 0;
const fields = ty.structFields(mod).values();
const field_vals = try arena.alloc(InternPool.Index, fields.len);
for (fields, 0..) |field, i| {
const field_bits = @as(u16, @intCast(field.ty.bitSize(mod)));
field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod);
bits += field_bits;
}
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
} })).toValue();
},
.Struct => {
// Sema is supposed to have emitted a compile error already for Auto layout structs,
// and Extern is handled by non-packed readFromMemory.
const struct_type = mod.typeToPackedStruct(ty).?;
var bits: u16 = 0;
const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
for (field_vals, 0..) |*field_val, i| {
const field_ty = struct_type.field_types.get(ip)[i].toType();
const field_bits: u16 = @intCast(field_ty.bitSize(mod));
field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod);
bits += field_bits;
}
return (try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = field_vals },
} })).toValue();
},
.Pointer => {
assert(!ty.isSlice(mod)); // No well defined layout.
@@ -1105,18 +1115,18 @@ pub const Value = struct {
pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.big_int => |big_int| @as(T, @floatCast(bigIntToFloat(big_int.limbs, big_int.positive))),
.big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)),
inline .u64, .i64 => |x| {
if (T == f80) {
@panic("TODO we can't lower this properly on non-x86 llvm backend yet");
}
return @as(T, @floatFromInt(x));
return @floatFromInt(x);
},
.lazy_align => |ty| @as(T, @floatFromInt(ty.toType().abiAlignment(mod))),
.lazy_size => |ty| @as(T, @floatFromInt(ty.toType().abiSize(mod))),
.lazy_align => |ty| @floatFromInt(ty.toType().abiAlignment(mod).toByteUnits(0)),
.lazy_size => |ty| @floatFromInt(ty.toType().abiSize(mod)),
},
.float => |float| switch (float.storage) {
inline else => |x| @as(T, @floatCast(x)),
inline else => |x| @floatCast(x),
},
else => unreachable,
};
@@ -1255,7 +1265,8 @@ pub const Value = struct {
.int => |int| switch (int.storage) {
.big_int => |big_int| big_int.orderAgainstScalar(0),
inline .u64, .i64 => |x| std.math.order(x, 0),
.lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
.lazy_align => .gt, // alignment is never 0
.lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced(
mod,
false,
if (opt_sema) |sema| .{ .sema = sema } else .eager,
@@ -1510,33 +1521,38 @@ pub const Value = struct {
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value {
return (try val.maybeElemValue(mod, index)).?;
}
/// Like `elemValue`, but returns `null` instead of asserting on failure.
pub fn maybeElemValue(val: Value, mod: *Module, index: usize) Allocator.Error!?Value {
return switch (val.ip_index) {
.none => switch (val.tag()) {
.bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]),
.repeated => val.castTag(.repeated).?.data,
.aggregate => val.castTag(.aggregate).?.data[index],
.slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index),
else => unreachable,
.slice => val.castTag(.slice).?.data.ptr.maybeElemValue(mod, index),
else => null,
},
else => switch (mod.intern_pool.indexToKey(val.toIntern())) {
.undef => |ty| (try mod.intern(.{
.undef = ty.toType().elemType2(mod).toIntern(),
})).toValue(),
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| mod.declPtr(decl).val.elemValue(mod, index),
.decl => |decl| mod.declPtr(decl).val.maybeElemValue(mod, index),
.mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod))
.toValue().elemValue(mod, index),
.int, .eu_payload => unreachable,
.opt_payload => |base| base.toValue().elemValue(mod, index),
.comptime_field => |field_val| field_val.toValue().elemValue(mod, index),
.elem => |elem| elem.base.toValue().elemValue(mod, index + @as(usize, @intCast(elem.index))),
.toValue().maybeElemValue(mod, index),
.int, .eu_payload => null,
.opt_payload => |base| base.toValue().maybeElemValue(mod, index),
.comptime_field => |field_val| field_val.toValue().maybeElemValue(mod, index),
.elem => |elem| elem.base.toValue().maybeElemValue(mod, index + @as(usize, @intCast(elem.index))),
.field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| {
const base_decl = mod.declPtr(decl_index);
const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index)));
return field_val.elemValue(mod, index);
} else unreachable,
return field_val.maybeElemValue(mod, index);
} else null,
},
.opt => |opt| opt.val.toValue().elemValue(mod, index),
.opt => |opt| opt.val.toValue().maybeElemValue(mod, index),
.aggregate => |aggregate| {
const len = mod.intern_pool.aggregateTypeLen(aggregate.ty);
if (index < len) return switch (aggregate.storage) {
@@ -1550,7 +1566,7 @@ pub const Value = struct {
assert(index == len);
return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue();
},
else => unreachable,
else => null,
},
};
}
@@ -1875,9 +1891,9 @@ pub const Value = struct {
},
inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
.lazy_align => |ty| if (opt_sema) |sema| {
return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
return floatFromIntInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
} else {
return floatFromIntInner(ty.toType().abiAlignment(mod), float_ty, mod);
return floatFromIntInner(ty.toType().abiAlignment(mod).toByteUnits(0), float_ty, mod);
},
.lazy_size => |ty| if (opt_sema) |sema| {
return floatFromIntInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);
@@ -1892,11 +1908,11 @@ pub const Value = struct {
fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value {
const target = mod.getTarget();
const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) {
16 => .{ .f16 = @as(f16, @floatFromInt(x)) },
32 => .{ .f32 = @as(f32, @floatFromInt(x)) },
64 => .{ .f64 = @as(f64, @floatFromInt(x)) },
80 => .{ .f80 = @as(f80, @floatFromInt(x)) },
128 => .{ .f128 = @as(f128, @floatFromInt(x)) },
16 => .{ .f16 = @floatFromInt(x) },
32 => .{ .f32 = @floatFromInt(x) },
64 => .{ .f64 = @floatFromInt(x) },
80 => .{ .f80 = @floatFromInt(x) },
128 => .{ .f128 = @floatFromInt(x) },
else => unreachable,
};
return (try mod.intern(.{ .float = .{
+55
View File
@@ -619,3 +619,58 @@ test "sub-aligned pointer field access" {
.Little => try expect(x == 0x09080706),
}
}
test "alignment of zero-bit types is respected" {
if (true) return error.SkipZigTest; // TODO
const S = struct { arr: [0]usize = .{} };
comptime assert(@alignOf(void) == 1);
comptime assert(@alignOf(u0) == 1);
comptime assert(@alignOf([0]usize) == @alignOf(usize));
comptime assert(@alignOf(S) == @alignOf(usize));
var s: S = .{};
var v32: void align(32) = {};
var x32: u0 align(32) = 0;
var s32: S align(32) = .{};
var zero: usize = 0;
try expect(@intFromPtr(&s) % @alignOf(usize) == 0);
try expect(@intFromPtr(&s.arr) % @alignOf(usize) == 0);
try expect(@intFromPtr(s.arr[zero..zero].ptr) % @alignOf(usize) == 0);
try expect(@intFromPtr(&v32) % 32 == 0);
try expect(@intFromPtr(&x32) % 32 == 0);
try expect(@intFromPtr(&s32) % 32 == 0);
try expect(@intFromPtr(&s32.arr) % 32 == 0);
try expect(@intFromPtr(s32.arr[zero..zero].ptr) % 32 == 0);
}
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = extern struct {
x: u8,
a: [0]u16 = .{},
y: u8,
};
// `a` should give `S` alignment 2, and pad the `arr` field.
comptime assert(@alignOf(S) == 2);
comptime assert(@sizeOf(S) == 4);
comptime assert(@offsetOf(S, "x") == 0);
comptime assert(@offsetOf(S, "a") == 2);
comptime assert(@offsetOf(S, "y") == 2);
var s: S = .{ .x = 100, .y = 200 };
try expect(@intFromPtr(&s) % 2 == 0);
try expect(@intFromPtr(&s.y) - @intFromPtr(&s.x) == 2);
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
try expect(@fieldParentPtr(S, "a", &s.a) == &s);
}
+7 -18
View File
@@ -18,24 +18,13 @@ test "@alignOf(T) before referencing T" {
}
test "comparison of @alignOf(T) against zero" {
{
const T = struct { x: u32 };
try expect(!(@alignOf(T) == 0));
try expect(@alignOf(T) != 0);
try expect(!(@alignOf(T) < 0));
try expect(!(@alignOf(T) <= 0));
try expect(@alignOf(T) > 0);
try expect(@alignOf(T) >= 0);
}
{
const T = struct {};
try expect(@alignOf(T) == 0);
try expect(!(@alignOf(T) != 0));
try expect(!(@alignOf(T) < 0));
try expect(@alignOf(T) <= 0);
try expect(!(@alignOf(T) > 0));
try expect(@alignOf(T) >= 0);
}
const T = struct { x: u32 };
try expect(!(@alignOf(T) == 0));
try expect(@alignOf(T) != 0);
try expect(!(@alignOf(T) < 0));
try expect(!(@alignOf(T) <= 0));
try expect(@alignOf(T) > 0);
try expect(@alignOf(T) >= 0);
}
test "correct alignment for elements and slices of aligned array" {
+1 -1
View File
@@ -37,7 +37,7 @@ test "switch on empty tagged union" {
test "empty union" {
const U = union {};
try expect(@sizeOf(U) == 0);
try expect(@alignOf(U) == 0);
try expect(@alignOf(U) == 1);
}
test "empty extern union" {