llvm: implement restricted type optimizations

This commit is contained in:
Jacob Young
2026-04-21 06:37:24 -04:00
parent 53373c4c71
commit 221fb30b3c
12 changed files with 562 additions and 223 deletions
+301 -137
View File
@@ -1284,7 +1284,7 @@ pub const Attribute = union(Kind) {
try w.print(" {t}(\"", .{attribute});
var any = false;
inline for (@typeInfo(AllocKind).@"struct".fields) |field| {
if (comptime std.mem.eql(u8, field.name, "_")) continue;
if (comptime std.mem.eql(u8, field.name, "unused")) continue;
if (@field(allockind, field.name)) {
if (!any) {
try w.writeByte(',');
@@ -1469,7 +1469,7 @@ pub const Attribute = union(Kind) {
positive_subnormal: bool = false,
positive_normal: bool = false,
positive_infinity: bool = false,
_: u22 = 0,
unused: enum(u22) { unused = 0 } = .unused,
pub const all = FpClass{
.signaling_nan = true,
@@ -1512,7 +1512,7 @@ pub const Attribute = union(Kind) {
uninitialized: bool,
zeroed: bool,
aligned: bool,
_: u26 = 0,
unused: enum(u26) { unused = 0 } = .unused,
};
pub const AllocSize = packed struct(u32) {
@@ -1533,7 +1533,7 @@ pub const Attribute = union(Kind) {
argmem: Effect = .none,
inaccessiblemem: Effect = .none,
other: Effect = .none,
_: u26 = 0,
unused: enum(u26) { unused = 0 } = .unused,
pub const Effect = enum(u2) { none, read, write, readwrite };
@@ -1553,7 +1553,7 @@ pub const Attribute = union(Kind) {
pub const VScaleRange = packed struct(u32) {
min: Alignment,
max: Alignment,
_: u20 = 0,
unused: enum(u20) { unused = 0 } = .unused,
fn toLlvm(self: VScaleRange) packed struct(u64) { max: u32, min: u32 } {
return .{
@@ -1870,7 +1870,7 @@ pub const ThreadLocal = enum(u3) {
pub fn format(p: Prefixed, w: *Writer) Writer.Error!void {
switch (p.thread_local) {
.default => return,
.default => {},
.generaldynamic => {
var vecs: [2][]const u8 = .{ p.prefix, "thread_local" };
return w.writeVecAll(&vecs);
@@ -4221,7 +4221,6 @@ pub const Function = struct {
call,
@"call fast",
cmpxchg,
@"cmpxchg weak",
extractelement,
extractvalue,
fadd,
@@ -4619,9 +4618,7 @@ pub const Function = struct {
.@"tail call",
.@"tail call fast",
=> wip.extraData(Call, instruction.data).ty.functionReturn(wip.builder),
.cmpxchg,
.@"cmpxchg weak",
=> wip.builder.structTypeAssumeCapacity(.normal, &.{
.cmpxchg => wip.builder.structTypeAssumeCapacity(.normal, &.{
wip.extraData(CmpXchg, instruction.data).cmp.typeOfWip(wip),
.i1,
}),
@@ -4806,9 +4803,7 @@ pub const Function = struct {
.@"tail call",
.@"tail call fast",
=> function.extraData(Call, instruction.data).ty.functionReturn(builder),
.cmpxchg,
.@"cmpxchg weak",
=> builder.structTypeAssumeCapacity(.normal, &.{
.cmpxchg => builder.structTypeAssumeCapacity(.normal, &.{
function.extraData(CmpXchg, instruction.data)
.cmp.typeOf(function_index, builder),
.i1,
@@ -5027,33 +5022,86 @@ pub const Function = struct {
pub const Info = packed struct(u32) {
alignment: Alignment,
addr_space: AddrSpace,
_: u2 = undefined,
unused: enum(u2) { unused = 0 } = .unused,
};
};
pub const Load = struct {
info: MemoryAccessInfo,
info: Info,
type: Type,
ptr: Value,
//range: if (info.has_range) Metadata else void,
pub const Info = packed struct(u32) {
access_kind: MemoryAccessKind,
sync_scope: SyncScope,
ordering: AtomicOrdering,
alignment: Alignment,
has_range: bool,
unused: enum(u20) { unused = 0 } = .unused,
};
};
pub const Store = struct {
info: MemoryAccessInfo,
info: Info,
val: Value,
ptr: Value,
pub const Info = packed struct(u32) {
access_kind: MemoryAccessKind,
sync_scope: SyncScope,
ordering: AtomicOrdering,
alignment: Alignment,
unused: enum(u21) { unused = 0 } = .unused,
};
};
pub const CmpXchg = struct {
info: MemoryAccessInfo,
info: Info,
ptr: Value,
cmp: Value,
new: Value,
pub const Kind = enum { strong, weak };
pub const Kind = enum(u1) {
strong,
weak,
pub fn format(kind: Kind, w: *Writer) Writer.Error!void {
return Prefixed.format(.{ .kind = kind, .prefix = "" }, w);
}
pub const Prefixed = struct {
kind: Kind,
prefix: []const u8,
pub fn format(p: Prefixed, w: *Writer) Writer.Error!void {
switch (p.kind) {
.strong => {},
.weak => {
var vecs: [2][]const u8 = .{ p.prefix, "weak" };
try w.writeVecAll(&vecs);
},
}
}
};
pub fn fmt(kind: Kind, prefix: []const u8) Prefixed {
return .{ .kind = kind, .prefix = prefix };
}
};
pub const Info = packed struct(u32) {
kind: Kind,
access_kind: MemoryAccessKind,
sync_scope: SyncScope,
success_ordering: AtomicOrdering,
failure_ordering: AtomicOrdering,
alignment: Alignment,
unused: enum(u17) { unused = 0 } = .unused,
};
};
pub const AtomicRmw = struct {
info: MemoryAccessInfo,
info: Info,
ptr: Value,
val: Value,
@@ -5073,8 +5121,21 @@ pub const Function = struct {
fsub = 12,
fmax = 13,
fmin = 14,
none = maxInt(u5),
};
pub const Info = packed struct(u32) {
access_kind: MemoryAccessKind,
operation: Operation,
sync_scope: SyncScope,
ordering: AtomicOrdering,
alignment: Alignment,
unused: enum(u16) { unused = 0 } = .unused,
};
};
pub const Fence = packed struct(u32) {
sync_scope: SyncScope,
success_ordering: AtomicOrdering,
unused: enum(u28) { unused = 0 } = .unused,
};
pub const GetElementPtr = struct {
@@ -5112,6 +5173,7 @@ pub const Function = struct {
callee: Value,
args_len: u32,
//args: [args_len]Value,
//callees: if (info.has_callees) Metadata else void,
pub const Kind = enum {
normal,
@@ -5125,8 +5187,9 @@ pub const Function = struct {
};
pub const Info = packed struct(u32) {
call_conv: CallConv,
has_callees: bool,
has_op_bundle_cold: bool,
_: u21 = undefined,
unused: enum(u20) { unused = 0 } = .unused,
};
};
@@ -5195,8 +5258,11 @@ pub const Function = struct {
Value,
Instruction.BrCond.Weights,
=> @enumFromInt(value),
MemoryAccessInfo,
Instruction.Alloca.Info,
Instruction.Load.Info,
Instruction.Store.Info,
Instruction.CmpXchg.Info,
Instruction.AtomicRmw.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
@@ -5706,6 +5772,10 @@ pub const WipFunction = struct {
return instruction.toValue();
}
pub const LoadMetadata = struct {
range: Metadata.Optional = .none,
};
pub fn load(
self: *WipFunction,
access_kind: MemoryAccessKind,
@@ -5714,7 +5784,19 @@ pub const WipFunction = struct {
alignment: Alignment,
name: []const u8,
) Allocator.Error!Value {
return self.loadAtomic(access_kind, ty, ptr, .system, .none, alignment, name);
return self.loadMetadata(access_kind, ty, ptr, alignment, .{}, name);
}
pub fn loadMetadata(
self: *WipFunction,
access_kind: MemoryAccessKind,
ty: Type,
ptr: Value,
alignment: Alignment,
metadata: LoadMetadata,
name: []const u8,
) Allocator.Error!Value {
return self.loadAtomicMetadata(access_kind, ty, ptr, .system, .none, alignment, metadata, name);
}
pub fn loadAtomic(
@@ -5726,6 +5808,20 @@ pub const WipFunction = struct {
ordering: AtomicOrdering,
alignment: Alignment,
name: []const u8,
) Allocator.Error!Value {
return self.loadAtomicMetadata(access_kind, ty, ptr, sync_scope, ordering, alignment, .{}, name);
}
pub fn loadAtomicMetadata(
self: *WipFunction,
access_kind: MemoryAccessKind,
ty: Type,
ptr: Value,
sync_scope: SyncScope,
ordering: AtomicOrdering,
alignment: Alignment,
metadata: LoadMetadata,
name: []const u8,
) Allocator.Error!Value {
assert(ptr.typeOfWip(self).isPointer(self.builder));
try self.ensureUnusedExtraCapacity(1, Instruction.Load, 0);
@@ -5741,13 +5837,15 @@ pub const WipFunction = struct {
.none => .system,
else => sync_scope,
},
.success_ordering = ordering,
.ordering = ordering,
.alignment = alignment,
.has_range = !metadata.range.is_none,
},
.type = ty,
.ptr = ptr,
}),
});
if (metadata.range.unwrap()) |range| self.extra.appendAssumeCapacity(@bitCast(range));
return instruction.toValue();
}
@@ -5784,7 +5882,7 @@ pub const WipFunction = struct {
.none => .system,
else => sync_scope,
},
.success_ordering = ordering,
.ordering = ordering,
.alignment = alignment,
},
.val = val,
@@ -5803,7 +5901,7 @@ pub const WipFunction = struct {
try self.ensureUnusedExtraCapacity(1, NoExtra, 0);
const instruction = try self.addInst(null, .{
.tag = .fence,
.data = @bitCast(MemoryAccessInfo{
.data = @bitCast(Instruction.Fence{
.sync_scope = sync_scope,
.success_ordering = ordering,
}),
@@ -5833,12 +5931,10 @@ pub const WipFunction = struct {
_ = try self.builder.structType(.normal, &.{ ty, .i1 });
try self.ensureUnusedExtraCapacity(1, Instruction.CmpXchg, 0);
const instruction = try self.addInst(name, .{
.tag = switch (kind) {
.strong => .cmpxchg,
.weak => .@"cmpxchg weak",
},
.tag = .cmpxchg,
.data = self.addExtraAssumeCapacity(Instruction.CmpXchg{
.info = .{
.kind = kind,
.access_kind = access_kind,
.sync_scope = sync_scope,
.success_ordering = success_ordering,
@@ -5873,9 +5969,9 @@ pub const WipFunction = struct {
.data = self.addExtraAssumeCapacity(Instruction.AtomicRmw{
.info = .{
.access_kind = access_kind,
.atomic_rmw_operation = operation,
.operation = operation,
.sync_scope = sync_scope,
.success_ordering = ordering,
.ordering = ordering,
.alignment = alignment,
},
.ptr = ptr,
@@ -6079,6 +6175,11 @@ pub const WipFunction = struct {
}, cond, lhs, rhs, name);
}
pub const CallMetadata = struct {
callees: Metadata.Optional = .none,
has_op_bundle_cold: bool = false,
};
pub fn call(
self: *WipFunction,
kind: Instruction.Call.Kind,
@@ -6089,10 +6190,10 @@ pub const WipFunction = struct {
args: []const Value,
name: []const u8,
) Allocator.Error!Value {
return self.callInner(kind, call_conv, function_attributes, ty, callee, args, name, false);
return self.callMetadata(kind, call_conv, function_attributes, ty, callee, args, .{}, name);
}
fn callInner(
pub fn callMetadata(
self: *WipFunction,
kind: Instruction.Call.Kind,
call_conv: CallConv,
@@ -6100,8 +6201,8 @@ pub const WipFunction = struct {
ty: Type,
callee: Value,
args: []const Value,
metadata: CallMetadata,
name: []const u8,
has_op_bundle_cold: bool,
) Allocator.Error!Value {
const ret_ty = ty.functionReturn(self.builder);
assert(ty.isFunction(self.builder));
@@ -6109,7 +6210,8 @@ pub const WipFunction = struct {
const params = ty.functionParameters(self.builder);
for (params, args[0..params.len]) |param, arg_val| assert(param == arg_val.typeOfWip(self));
try self.ensureUnusedExtraCapacity(1, Instruction.Call, args.len);
try self.ensureUnusedExtraCapacity(1, Instruction.Call, args.len +
@intFromBool(!metadata.callees.is_none));
const instruction = try self.addInst(switch (ret_ty) {
.void => null,
else => name,
@@ -6127,7 +6229,8 @@ pub const WipFunction = struct {
.data = self.addExtraAssumeCapacity(Instruction.Call{
.info = .{
.call_conv = call_conv,
.has_op_bundle_cold = has_op_bundle_cold,
.has_callees = !metadata.callees.is_none,
.has_op_bundle_cold = metadata.has_op_bundle_cold,
},
.attributes = function_attributes,
.ty = ty,
@@ -6136,6 +6239,7 @@ pub const WipFunction = struct {
}),
});
self.extra.appendSliceAssumeCapacity(@ptrCast(args));
if (metadata.callees.unwrap()) |callees| self.extra.appendAssumeCapacity(@bitCast(callees));
return instruction.toValue();
}
@@ -6176,15 +6280,15 @@ pub const WipFunction = struct {
pub fn callIntrinsicAssumeCold(self: *WipFunction) Allocator.Error!Value {
const intrinsic = try self.builder.getIntrinsic(.assume, &.{});
return self.callInner(
return self.callMetadata(
.normal,
CallConv.default,
.none,
intrinsic.typeOf(self.builder),
intrinsic.toValue(self.builder),
&.{try self.builder.intValue(.i1, 1)},
"",
true,
.{ .has_op_bundle_cold = true },
undefined,
);
}
@@ -6355,8 +6459,11 @@ pub const WipFunction = struct {
Value,
Instruction.BrCond.Weights,
=> @intFromEnum(value),
MemoryAccessInfo,
Instruction.Alloca.Info,
Instruction.Load.Info,
Instruction.Store.Info,
Instruction.CmpXchg.Info,
Instruction.AtomicRmw.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
@@ -6648,6 +6755,7 @@ pub const WipFunction = struct {
=> {
var extra = self.extraDataTrail(Instruction.Call, instruction.data);
const args = extra.trail.next(extra.data.args_len, Value, self);
const callees = extra.trail.next(@intFromBool(extra.data.info.has_callees), Metadata, self);
instruction.data = wip_extra.addExtra(Instruction.Call{
.info = extra.data.info,
.attributes = extra.data.attributes,
@@ -6656,10 +6764,9 @@ pub const WipFunction = struct {
.args_len = extra.data.args_len,
});
wip_extra.appendMappedValues(args, instructions);
wip_extra.appendSlice(callees);
},
.cmpxchg,
.@"cmpxchg weak",
=> {
.cmpxchg => {
const extra = self.extraData(Instruction.CmpXchg, instruction.data);
instruction.data = wip_extra.addExtra(Instruction.CmpXchg{
.info = extra.info,
@@ -6730,12 +6837,14 @@ pub const WipFunction = struct {
.load,
.@"load atomic",
=> {
const extra = self.extraData(Instruction.Load, instruction.data);
var extra = self.extraDataTrail(Instruction.Load, instruction.data);
const range = extra.trail.next(@intFromBool(extra.data.info.has_range), Metadata, self);
instruction.data = wip_extra.addExtra(Instruction.Load{
.type = extra.type,
.ptr = instructions.map(extra.ptr),
.info = extra.info,
.type = extra.data.type,
.ptr = instructions.map(extra.data.ptr),
.info = extra.data.info,
});
wip_extra.appendSlice(range);
},
.phi,
.@"phi fast",
@@ -7011,8 +7120,11 @@ pub const WipFunction = struct {
Value,
Instruction.BrCond.Weights,
=> @intFromEnum(value),
MemoryAccessInfo,
Instruction.Alloca.Info,
Instruction.Load.Info,
Instruction.Store.Info,
Instruction.CmpXchg.Info,
Instruction.AtomicRmw.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
@@ -7060,8 +7172,11 @@ pub const WipFunction = struct {
Value,
Instruction.BrCond.Weights,
=> @enumFromInt(value),
MemoryAccessInfo,
Instruction.Alloca.Info,
Instruction.Load.Info,
Instruction.Store.Info,
Instruction.CmpXchg.Info,
Instruction.AtomicRmw.Info,
Instruction.Call.Info,
=> @bitCast(value),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
@@ -7121,10 +7236,10 @@ pub const MemoryAccessKind = enum(u1) {
pub fn format(p: Prefixed, w: *Writer) Writer.Error!void {
switch (p.memory_access_kind) {
.normal => return,
.normal => {},
.@"volatile" => {
var vecs: [2][]const u8 = .{ p.prefix, "volatile" };
return w.writeVecAll(&vecs);
try w.writeVecAll(&vecs);
},
}
}
@@ -7149,10 +7264,10 @@ pub const SyncScope = enum(u1) {
pub fn format(p: Prefixed, w: *Writer) Writer.Error!void {
switch (p.sync_scope) {
.system => return,
.system => {},
.singlethread => {
var vecs: [2][]const u8 = .{ p.prefix, "syncscope(\"singlethread\")" };
return w.writeVecAll(&vecs);
try w.writeVecAll(&vecs);
},
}
}
@@ -7182,10 +7297,10 @@ pub const AtomicOrdering = enum(u3) {
pub fn format(p: Prefixed, w: *Writer) Writer.Error!void {
switch (p.atomic_ordering) {
.none => return,
.none => {},
else => {
var vecs: [2][]const u8 = .{ p.prefix, @tagName(p.atomic_ordering) };
return w.writeVecAll(&vecs);
try w.writeVecAll(&vecs);
},
}
}
@@ -7196,16 +7311,6 @@ pub const AtomicOrdering = enum(u3) {
}
};
const MemoryAccessInfo = packed struct(u32) {
access_kind: MemoryAccessKind = .normal,
atomic_rmw_operation: Function.Instruction.AtomicRmw.Operation = .none,
sync_scope: SyncScope,
success_ordering: AtomicOrdering,
failure_ordering: AtomicOrdering = .none,
alignment: Alignment = .default,
_: u13 = undefined,
};
pub const FastMath = packed struct(u8) {
unsafe_algebra: bool = false, // Legacy
nnan: bool = false,
@@ -7533,7 +7638,7 @@ pub const Constant = enum(u32) {
const item = builder.constant_items.get(constant);
return switch (item.tag) {
.positive_integer => {
const extra: *align(@alignOf(std.math.big.Limb)) Integer =
const extra: *align(@alignOf(std.math.big.Limb)) const Integer =
@ptrCast(builder.constant_limbs.items[item.data..][0..Integer.limbs]);
const limbs = builder.constant_limbs
.items[item.data + Integer.limbs ..][0..extra.limbs_len];
@@ -7568,6 +7673,21 @@ pub const Constant = enum(u32) {
}
}
pub fn toInt(self: Constant, builder: *const Builder) ?std.math.big.int.Const {
const item = builder.constant_items.get(self.unwrap().constant);
switch (item.tag) {
.positive_integer, .negative_integer => {
const extra: *align(@alignOf(std.math.big.Limb)) const Integer =
@ptrCast(builder.constant_limbs.items[item.data..][0..Integer.limbs]);
return .{
.positive = item.tag == .positive_integer,
.limbs = builder.constant_limbs.items[item.data + Integer.limbs ..][0..extra.limbs_len],
};
},
else => return null,
}
}
pub fn getBase(self: Constant, builder: *const Builder) Global.Index {
var cur = self;
while (true) switch (cur.unwrap()) {
@@ -8183,7 +8303,7 @@ pub const Metadata = packed struct(u32) {
};
pub const DIFlags = packed struct(u32) {
Visibility: enum(u2) { Zero, Private, Protected, Public } = .Zero,
Visibility: enum(u2) { None, Private, Protected, Public } = .None,
FwdDecl: bool = false,
AppleBlock: bool = false,
ReservedBit4: u1 = 0,
@@ -8199,11 +8319,11 @@ pub const Metadata = packed struct(u32) {
RValueReference: bool = false,
ExportSymbols: bool = false,
Inheritance: enum(u2) {
Zero,
None,
SingleInheritance,
MultipleInheritance,
VirtualInheritance,
} = .Zero,
} = .None,
IntroducedVirtual: bool = false,
BitField: bool = false,
NoReturn: bool = false,
@@ -8226,7 +8346,7 @@ pub const Metadata = packed struct(u32) {
if (need_pipe) try w.writeAll(" | ") else need_pipe = true;
try w.print("DIFlag{s}", .{field.name});
},
.@"enum" => if (@field(self, field.name) != .Zero) {
.@"enum" => if (@field(self, field.name) != .None) {
if (need_pipe) try w.writeAll(" | ") else need_pipe = true;
try w.print("DIFlag{s}", .{@tagName(@field(self, field.name))});
},
@@ -8262,7 +8382,7 @@ pub const Metadata = packed struct(u32) {
};
pub const DISPFlags = packed struct(u32) {
Virtuality: enum(u2) { Zero, Virtual, PureVirtual } = .Zero,
Virtuality: enum(u2) { None, Virtual, PureVirtual } = .None,
LocalToUnit: bool = false,
Definition: bool = false,
Optimized: bool = false,
@@ -8283,7 +8403,7 @@ pub const Metadata = packed struct(u32) {
if (need_pipe) try w.writeAll(" | ") else need_pipe = true;
try w.print("DISPFlag{s}", .{field.name});
},
.@"enum" => if (@field(self, field.name) != .Zero) {
.@"enum" => if (@field(self, field.name) != .None) {
if (need_pipe) try w.writeAll(" | ") else need_pipe = true;
try w.print("DISPFlag{s}", .{@tagName(@field(self, field.name))});
},
@@ -10007,11 +10127,11 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
instruction_index.name(&function).fmt(self),
tag,
extra.info.access_kind.fmt(" "),
extra.info.atomic_rmw_operation,
extra.info.operation,
extra.ptr.fmt(function_index, self, .{ .percent = true }),
extra.val.fmt(function_index, self, .{ .percent = true }),
extra.info.sync_scope.fmt(" "),
extra.info.success_ordering.fmt(" "),
extra.info.ordering.fmt(" "),
extra.info.alignment.fmt(", "),
});
},
@@ -10055,9 +10175,10 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
.@"tail call",
.@"tail call fast",
=> |tag| {
var extra =
function.extraDataTrail(Function.Instruction.Call, instruction.data);
var extra = function.extraDataTrail(Function.Instruction.Call, instruction.data);
const args = extra.trail.next(extra.data.args_len, Value, &function);
const callees =
extra.trail.next(@intFromBool(extra.data.info.has_callees), Metadata, &function);
try w.writeAll(" ");
const ret_ty = extra.data.ty.functionReturn(self);
switch (ret_ty) {
@@ -10089,9 +10210,6 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
});
}
try w.writeByte(')');
if (extra.data.info.has_op_bundle_cold) {
try w.writeAll(" [ \"cold\"() ]");
}
const call_function_attributes = extra.data.attributes.func(self);
if (call_function_attributes != .none) try w.print(" #{d}", .{
(try attribute_groups.getOrPutValue(
@@ -10100,15 +10218,19 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
{},
)).index,
});
if (extra.data.info.has_op_bundle_cold) try w.writeAll(" [ \"cold\"() ]");
metadata_formatter.need_comma = true;
defer metadata_formatter.need_comma = undefined;
for (callees) |metadata| try w.print("{f}", .{
try metadata_formatter.fmt("!callees ", metadata, null),
});
},
.cmpxchg,
.@"cmpxchg weak",
=> |tag| {
const extra =
function.extraData(Function.Instruction.CmpXchg, instruction.data);
try w.print(" %{f} = {t}{f} {f}, {f}, {f}{f}{f}{f}{f}", .{
.cmpxchg => |tag| {
const extra = function.extraData(Function.Instruction.CmpXchg, instruction.data);
try w.print(" %{f} = {t}{f}{f} {f}, {f}, {f}{f}{f}{f}{f}", .{
instruction_index.name(&function).fmt(self),
tag,
extra.info.kind.fmt(" "),
extra.info.access_kind.fmt(" "),
extra.ptr.fmt(function_index, self, .{ .percent = true }),
extra.cmp.fmt(function_index, self, .{ .percent = true }),
@@ -10143,11 +10265,11 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
for (indices) |index| try w.print(", {d}", .{index});
},
.fence => |tag| {
const info: MemoryAccessInfo = @bitCast(instruction.data);
const fence: Function.Instruction.Fence = @bitCast(instruction.data);
try w.print(" {t}{f}{f}", .{
tag,
info.sync_scope.fmt(" "),
info.success_ordering.fmt(" "),
fence.sync_scope.fmt(" "),
fence.success_ordering.fmt(" "),
});
},
.fneg,
@@ -10221,16 +10343,22 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
.load,
.@"load atomic",
=> |tag| {
const extra = function.extraData(Function.Instruction.Load, instruction.data);
var extra = function.extraDataTrail(Function.Instruction.Load, instruction.data);
const range = extra.trail.next(@intFromBool(extra.data.info.has_range), Metadata, &function);
try w.print(" %{f} = {t}{f} {f}, {f}{f}{f}{f}", .{
instruction_index.name(&function).fmt(self),
tag,
extra.info.access_kind.fmt(" "),
extra.type.fmt(self, .percent),
extra.ptr.fmt(function_index, self, .{ .percent = true }),
extra.info.sync_scope.fmt(" "),
extra.info.success_ordering.fmt(" "),
extra.info.alignment.fmt(", "),
extra.data.info.access_kind.fmt(" "),
extra.data.type.fmt(self, .percent),
extra.data.ptr.fmt(function_index, self, .{ .percent = true }),
extra.data.info.sync_scope.fmt(" "),
extra.data.info.ordering.fmt(" "),
extra.data.info.alignment.fmt(", "),
});
metadata_formatter.need_comma = true;
defer metadata_formatter.need_comma = undefined;
for (range) |metadata| if (metadata.unwrap(self) != Metadata.empty_tuple) try w.print("{f}", .{
try metadata_formatter.fmt("!range ", metadata, null),
});
},
.phi,
@@ -10296,7 +10424,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
extra.val.fmt(function_index, self, .{ .percent = true }),
extra.ptr.fmt(function_index, self, .{ .percent = true }),
extra.info.sync_scope.fmt(" "),
extra.info.success_ordering.fmt(" "),
extra.info.ordering.fmt(" "),
extra.info.alignment.fmt(", "),
});
},
@@ -12291,9 +12419,12 @@ pub fn debugFloatType(
return self.debugFloatTypeAssumeCapacity(name, size_in_bits);
}
pub fn debugForwardReference(self: *Builder) Allocator.Error!Metadata {
/// Deprecated, use `metadataForwardReference`.
pub const debugForwardReference = metadataForwardReference;
pub fn metadataForwardReference(self: *Builder) Allocator.Error!Metadata {
try self.metadata_forward_references.ensureUnusedCapacity(self.gpa, 1);
return self.debugForwardReferenceAssumeCapacity();
return self.metadataForwardReferenceAssumeCapacity();
}
pub fn debugStructType(
@@ -12521,15 +12652,8 @@ pub fn debugExpression(self: *Builder, elements: []const u32) Allocator.Error!Me
}
pub fn metadataTuple(self: *Builder, elements: []const Metadata) Allocator.Error!Metadata {
return self.metadataTupleOptionals(@ptrCast(elements));
}
pub fn metadataTupleOptionals(
self: *Builder,
elements: []const Metadata.Optional,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.Tuple, elements.len);
return self.metadataTupleOptionalsAssumeCapacity(elements);
return self.metadataTupleAssumeCapacity(elements);
}
pub fn debugLocalVar(
@@ -12595,9 +12719,12 @@ pub fn metadataConstant(self: *Builder, value: Constant) Allocator.Error!Metadat
return self.metadataConstantAssumeCapacity(value);
}
/// Deprecated, use `resolveMetadataForwardReference`.
pub const resolveDebugForwardReference = resolveMetadataForwardReference;
/// Resolves the given forward reference to the given value (which is not itself a forward
/// reference). If the forward reference is already resolved, its target is replaced.
pub fn resolveDebugForwardReference(self: *Builder, fwd_ref: Metadata, value: Metadata) void {
pub fn resolveMetadataForwardReference(self: *Builder, fwd_ref: Metadata, value: Metadata) void {
assert(fwd_ref.kind == .forward);
assert(value.kind != .forward);
self.metadata_forward_references.items[fwd_ref.index] = value.toOptional();
@@ -12790,8 +12917,7 @@ fn debugFloatTypeAssumeCapacity(self: *Builder, name: ?Metadata.String, size_in_
});
}
fn debugForwardReferenceAssumeCapacity(self: *Builder) Metadata {
assert(!self.strip);
fn metadataForwardReferenceAssumeCapacity(self: *Builder) Metadata {
const index = self.metadata_forward_references.items.len;
self.metadata_forward_references.appendAssumeCapacity(.none);
return .{ .index = @intCast(index), .kind = .forward };
@@ -13164,9 +13290,9 @@ fn debugExpressionAssumeCapacity(self: *Builder, elements: []const u32) Metadata
return .{ .index = @intCast(gop.index), .kind = .node };
}
fn metadataTupleOptionalsAssumeCapacity(self: *Builder, elements: []const Metadata.Optional) Metadata {
fn metadataTupleAssumeCapacity(self: *Builder, elements: []const Metadata) Metadata {
const Key = struct {
elements: []const Metadata.Optional,
elements: []const Metadata,
};
const Adapter = struct {
builder: *const Builder,
@@ -13181,9 +13307,9 @@ fn metadataTupleOptionalsAssumeCapacity(self: *Builder, elements: []const Metada
const rhs_data = ctx.builder.metadata_items.items(.data)[rhs_index];
var rhs_extra = ctx.builder.metadataExtraDataTrail(Metadata.Tuple, rhs_data);
return std.mem.eql(
Metadata.Optional,
Metadata,
lhs_key.elements,
rhs_extra.trail.next(rhs_extra.data.elements_len, Metadata.Optional, ctx.builder),
rhs_extra.trail.next(rhs_extra.data.elements_len, Metadata, ctx.builder),
);
}
};
@@ -13949,7 +14075,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator, producer: Producer) bitco
.positive_integer,
.negative_integer,
=> |tag| {
const extra: *align(@alignOf(std.math.big.Limb)) Constant.Integer =
const extra: *align(@alignOf(std.math.big.Limb)) const Constant.Integer =
@ptrCast(self.constant_limbs.items[data..][0..Constant.Integer.limbs]);
const bigint: std.math.big.int.Const = .{
.limbs = self.constant_limbs
@@ -15109,7 +15235,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator, producer: Producer) bitco
.ty = extra.type,
.alignment = extra.info.alignment.toLlvm(),
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.ordering = extra.info.ordering,
.sync_scope = extra.info.sync_scope,
});
},
@@ -15129,7 +15255,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator, producer: Producer) bitco
.val = adapter.getOffsetValueIndex(extra.val),
.alignment = extra.info.alignment.toLlvm(),
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.ordering = extra.info.ordering,
.sync_scope = extra.info.sync_scope,
});
},
@@ -15212,18 +15338,15 @@ pub fn toBitcode(self: *Builder, allocator: Allocator, producer: Producer) bitco
try function_block.writeAbbrev(FunctionBlock.AtomicRmw{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.val = adapter.getOffsetValueIndex(extra.val),
.operation = extra.info.atomic_rmw_operation,
.operation = extra.info.operation,
.is_volatile = extra.info.access_kind == .@"volatile",
.success_ordering = extra.info.success_ordering,
.ordering = extra.info.ordering,
.sync_scope = extra.info.sync_scope,
.alignment = extra.info.alignment.toLlvm(),
});
},
.cmpxchg,
.@"cmpxchg weak",
=> |kind| {
.cmpxchg => {
const extra = func.extraData(Function.Instruction.CmpXchg, data);
try function_block.writeAbbrev(FunctionBlock.CmpXchg{
.ptr = adapter.getOffsetValueIndex(extra.ptr),
.cmp = adapter.getOffsetValueIndex(extra.cmp),
@@ -15232,15 +15355,15 @@ pub fn toBitcode(self: *Builder, allocator: Allocator, producer: Producer) bitco
.success_ordering = extra.info.success_ordering,
.sync_scope = extra.info.sync_scope,
.failure_ordering = extra.info.failure_ordering,
.is_weak = kind == .@"cmpxchg weak",
.is_weak = extra.info.kind == .weak,
.alignment = extra.info.alignment.toLlvm(),
});
},
.fence => {
const info: MemoryAccessInfo = @bitCast(data);
const fence: Function.Instruction.Fence = @bitCast(data);
try function_block.writeAbbrev(FunctionBlock.Fence{
.ordering = info.success_ordering,
.sync_scope = info.sync_scope,
.ordering = fence.success_ordering,
.sync_scope = fence.sync_scope,
});
},
}
@@ -15313,19 +15436,60 @@ pub fn toBitcode(self: *Builder, allocator: Allocator, producer: Producer) bitco
};
switch (weights) {
.none => {},
.unpredictable => try metadata_attach_block.writeAbbrevAdapted(MetadataAttachmentBlock.AttachmentInstructionSingle{
.inst = instr_index,
.kind = .unpredictable,
.metadata = .empty_tuple,
}, metadata_adapter),
_ => try metadata_attach_block.writeAbbrevAdapted(MetadataAttachmentBlock.AttachmentInstructionSingle{
.inst = instr_index,
.kind = .prof,
.metadata = weights.toMetadata(),
}, metadata_adapter),
.unpredictable => try metadata_attach_block.writeAbbrevAdapted(
MetadataAttachmentBlock.AttachmentInstructionSingle{
.inst = instr_index,
.kind = .unpredictable,
.metadata = .empty_tuple,
},
metadata_adapter,
),
_ => try metadata_attach_block.writeAbbrevAdapted(
MetadataAttachmentBlock.AttachmentInstructionSingle{
.inst = instr_index,
.kind = .prof,
.metadata = weights.toMetadata(),
},
metadata_adapter,
),
}
instr_index += 1;
},
.call,
.@"call fast",
.@"musttail call",
.@"musttail call fast",
.@"notail call",
.@"notail call fast",
.@"tail call",
.@"tail call fast",
=> {
var extra = func.extraDataTrail(Function.Instruction.Call, data);
_ = extra.trail.next(extra.data.args_len, Value, &func);
const callees = extra.trail.next(@intFromBool(extra.data.info.has_callees), Metadata, &func);
for (callees) |metadata| try metadata_attach_block.writeAbbrevAdapted(
MetadataAttachmentBlock.AttachmentInstructionSingle{
.inst = instr_index,
.kind = .callees,
.metadata = metadata,
},
metadata_adapter,
);
instr_index += 1;
},
.load, .@"load atomic" => {
var extra = func.extraDataTrail(Function.Instruction.Load, data);
const range = extra.trail.next(@intFromBool(extra.data.info.has_range), Metadata, &func);
for (range) |metadata| if (metadata.unwrap(self) != Metadata.empty_tuple) try metadata_attach_block.writeAbbrevAdapted(
MetadataAttachmentBlock.AttachmentInstructionSingle{
.inst = instr_index,
.kind = .range,
.metadata = metadata,
},
metadata_adapter,
);
instr_index += 1;
},
};
try metadata_attach_block.end();
+5 -5
View File
@@ -79,7 +79,7 @@ pub const FixedMetadataKind = enum(u6) {
//tbaa = 1,
prof = 2,
//fpmath = 3,
//range = 4,
range = 4,
//@"tbaa.struct" = 5,
//@"invariant.load" = 6,
//@"alias.scope" = 7,
@@ -98,7 +98,7 @@ pub const FixedMetadataKind = enum(u6) {
//section_prefix = 20,
//absolute_symbol = 21,
//associated = 22,
//callees = 23,
callees = 23,
//irr_loop = 24,
//@"llvm.access.group" = 25,
//callback = 26,
@@ -1232,7 +1232,7 @@ pub const ModuleBlock = struct {
ty: Builder.Type,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
};
@@ -1264,7 +1264,7 @@ pub const ModuleBlock = struct {
val: u32,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
};
@@ -1315,7 +1315,7 @@ pub const ModuleBlock = struct {
val: u32,
operation: Builder.Function.Instruction.AtomicRmw.Operation,
is_volatile: bool,
success_ordering: Builder.AtomicOrdering,
ordering: Builder.AtomicOrdering,
sync_scope: Builder.SyncScope,
alignment: std.meta.Int(.unsigned, @bitSizeOf(Builder.Alignment)),
};
+5 -2
View File
@@ -7998,7 +7998,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
});
},
.restricted_value => |restricted_value| {
assert(restricted_value.ty.unwrap(ip).getTag(ip) == .type_restricted);
assert(ip.isRestrictedType(restricted_value.ty));
assert(!ip.isUndef(restricted_value.unrestricted_value));
items.appendAssumeCapacity(.{
.tag = .restricted_value,
@@ -9019,7 +9019,10 @@ pub fn getUnion(
) Allocator.Error!Index {
assert(un.ty != .none);
assert(un.val != .none);
assert(ip.loadUnionType(un.ty).layout != .@"packed");
const loaded_union = ip.loadUnionType(un.ty);
assert(loaded_union.layout != .@"packed");
assert(loaded_union.enum_tag_type == ip.typeOf(un.tag));
var gop = try ip.getOrPutKey(gpa, io, tid, .{ .un = un });
defer gop.deinit();
+41 -18
View File
@@ -10053,6 +10053,7 @@ fn analyzeSwitchBlock(
) CompileError!?Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = sema.gpa;
const src_node_offset = zir_switch.switch_src_node_offset;
@@ -10076,7 +10077,13 @@ fn analyzeSwitchBlock(
operand_ty.containerLayout(zcu) != .@"packed")
{
const tag_val = try sema.unionToTag(block, val);
break :init .{ tag_val, sema.typeOf(tag_val) };
const tag_ty = sema.typeOf(tag_val);
const unrestricted_tag_ty = tag_ty.unrestrictedType(zcu) orelse tag_ty;
const unrestricted_tag_val = if (unrestricted_tag_ty.toIntern() != tag_ty.toIntern())
try sema.unwrapRestricted(block, unrestricted_tag_ty, tag_val, src)
else
tag_val;
break :init .{ unrestricted_tag_val, unrestricted_tag_ty };
}
break :init .{
if (maybe_operand_opv) |operand_opv| .fromValue(operand_opv) else val,
@@ -10269,7 +10276,7 @@ fn analyzeSwitchBlock(
break :item_val item_opv;
}
if (maybe_operand_opv) |operand_opv| {
break :item_val .fromInterned(zcu.intern_pool.indexToKey(operand_opv.toIntern()).un.val);
break :item_val .fromInterned(ip.indexToKey(operand_opv.toIntern()).un.val);
}
assert(zir_switch.any_maybe_runtime_capture); // there's a payload capture
const operand_val, const operand_ref = switch (operand) {
@@ -11211,7 +11218,8 @@ fn validateSwitchBlock(
const union_obj = ip.loadUnionType(operand_ty.toIntern());
switch (union_obj.tag_usage) {
.tagged => {
break :item_ty .fromInterned(union_obj.enum_tag_type);
const enum_tag_ty: Type = .fromInterned(union_obj.enum_tag_type);
break :item_ty enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty;
},
.none => {
if (union_obj.layout == .@"packed") {
@@ -16636,7 +16644,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const union_field_ty = try sema.getBuiltinType(src, .@"Type.UnionField");
const union_obj = ip.loadUnionType(unrestricted_ty.toIntern());
const enum_obj = ip.loadEnumType(union_obj.enum_tag_type);
const enum_tag_ty: Type = .fromInterned(union_obj.enum_tag_type);
const enum_obj = ip.loadEnumType((enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty).toIntern());
const layout = union_obj.layout;
const union_field_vals = try gpa.alloc(InternPool.Index, enum_obj.field_names.len);
@@ -18220,7 +18229,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
break :blk ty;
};
if (elem_ty.zigTypeTag(zcu) == .noreturn)
if (elem_ty.toIntern() == .noreturn_type)
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
const target = zcu.getTarget();
@@ -18253,7 +18262,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
break :blk try sema.resolveAddressSpace(block, addrspace_src, ref, .pointer);
} else if (elem_ty.zigTypeTag(zcu) == .@"fn" and target.cpu.arch == .avr) .flash else .generic;
} else if (target.cpu.arch == .avr and ip.isFunctionType(elem_ty.toIntern())) .flash else .generic;
const bit_offset: u16 = if (inst_data.flags.has_bit_range) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
@@ -18284,7 +18293,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
}
}
if (elem_ty.zigTypeTag(zcu) == .@"fn") {
if (ip.isFunctionType(elem_ty.toIntern())) {
if (inst_data.size != .one) {
return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{});
}
@@ -18594,7 +18603,8 @@ fn zirStructInit(
);
const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src);
const tag_ty = resolved_ty.unionTagTypeHypothetical(zcu);
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const unrestricted_tag_ty = tag_ty.unrestrictedType(zcu) orelse tag_ty;
const tag_val = try pt.enumValueFieldIndex(unrestricted_tag_ty, field_index);
const field_ty: Type = .fromInterned(zcu.typeToUnion(resolved_ty).?.field_types.get(ip)[field_index]);
if (field_ty.classify(zcu) == .no_possible_value) {
@@ -18626,7 +18636,10 @@ fn zirStructInit(
if (sema.resolveValue(init_inst)) |val| {
const struct_val = Value.fromInterned(try pt.internUnion(.{
.ty = resolved_ty.toIntern(),
.tag = tag_val.toIntern(),
.tag = if (unrestricted_tag_ty.toIntern() != tag_ty.toIntern())
try pt.intern(.{ .restricted_value = .{ .ty = tag_ty.toIntern(), .unrestricted_value = tag_val.toIntern() } })
else
tag_val.toIntern(),
.val = val.toIntern(),
}));
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), src);
@@ -19358,7 +19371,8 @@ fn fieldType(
},
.@"union" => {
const union_obj = zcu.typeToUnion(cur_ty).?;
const enum_obj = ip.loadEnumType(union_obj.enum_tag_type);
const enum_tag_ty: Type = .fromInterned(union_obj.enum_tag_type);
const enum_obj = ip.loadEnumType((enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty).toIntern());
const field_index = enum_obj.nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, cur_ty, union_obj, field_src, field_name);
const field_ty = union_obj.field_types.get(ip)[field_index];
@@ -26604,6 +26618,7 @@ fn unionFieldPtr(
const union_obj = zcu.typeToUnion(union_ty).?;
const tag_ty: Type = .fromInterned(union_obj.enum_tag_type);
const unrestricted_tag_ty = tag_ty.unrestrictedType(zcu) orelse tag_ty;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
@@ -26630,17 +26645,17 @@ fn unionFieldPtr(
break :ct;
}
// Store to the union to initialize the tag.
const field_tag = try pt.enumValueFieldIndex(tag_ty, field_index);
const field_tag = try pt.enumValueFieldIndex(unrestricted_tag_ty, field_index);
const payload_val = try field_ty.onePossibleValue(pt) orelse try pt.undefValue(field_ty);
const new_union_val = try pt.unionValue(union_ty, field_tag, payload_val);
try sema.storePtrVal(block, src, union_ptr_val, new_union_val, union_ty);
} else {
const union_val = try sema.pointerDeref(block, src, union_ptr_val, union_ptr_val.typeOf(zcu)) orelse break :ct;
if (union_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, src, null);
const active_index = tag_ty.enumTagFieldIndex(union_val.unionTag(zcu).?, zcu).?;
const active_index = unrestricted_tag_ty.enumTagFieldIndex(union_val.unionTag(zcu).?, zcu).?;
if (active_index != field_index) {
const msg = msg: {
const active_field_name = tag_ty.enumFieldName(active_index, zcu);
const active_field_name = unrestricted_tag_ty.enumFieldName(active_index, zcu);
const msg = try sema.errMsg(src, "access of union field '{f}' while field '{f}' is active", .{
field_name.fmt(ip),
active_field_name.fmt(ip),
@@ -26660,19 +26675,26 @@ fn unionFieldPtr(
// If the union has a tag, we must either set or or safety check it depending on `initializing`.
tag: {
if (union_ty.containerLayout(zcu) != .auto) break :tag;
if (tag_ty.classify(zcu) == .one_possible_value) break :tag;
if (unrestricted_tag_ty.classify(zcu) == .one_possible_value) break :tag;
// There is a hypothetical non-trivial tag. We must set it even if not there at runtime, but
// only emit a safety check if it's available at runtime (i.e. it's safety-tagged).
const want_tag = try pt.enumValueFieldIndex(tag_ty, field_index);
const want_tag = try pt.enumValueFieldIndex(unrestricted_tag_ty, field_index);
if (initializing) {
const set_tag_inst = try block.addBinOp(.set_union_tag, union_ptr, .fromValue(want_tag));
const set_tag_inst = try block.addBinOp(.set_union_tag, union_ptr, if (unrestricted_tag_ty.toIntern() != tag_ty.toIntern())
.fromIntern(try pt.intern(.{ .restricted_value = .{ .ty = tag_ty.toIntern(), .unrestricted_value = want_tag.toIntern() } }))
else
.fromValue(want_tag));
try sema.checkComptimeKnownStore(block, set_tag_inst, .unneeded); // `unneeded` since this isn't a "proper" store
} else if (block.wantSafety() and union_obj.has_runtime_tag) {
// The tag exists at runtime (actual or safety tag), so emit a safety check.
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
const active_tag = try block.addTyOp(.get_union_tag, tag_ty, union_val);
try sema.addSafetyCheckInactiveUnionField(block, src, active_tag, .fromValue(want_tag));
const unrestricted_active_tag = if (unrestricted_tag_ty.toIntern() != tag_ty.toIntern())
try sema.unwrapRestricted(block, unrestricted_tag_ty, active_tag, src)
else
active_tag;
try sema.addSafetyCheckInactiveUnionField(block, src, unrestricted_active_tag, .fromValue(want_tag));
}
}
if (field_ty.classify(zcu) == .no_possible_value) {
@@ -33539,7 +33561,8 @@ fn unionFieldIndex(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const union_obj = zcu.typeToUnion(union_ty).?;
const enum_obj = ip.loadEnumType(union_obj.enum_tag_type);
const enum_tag_ty: Type = .fromInterned(union_obj.enum_tag_type);
const enum_obj = ip.loadEnumType((enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty).toIntern());
const field_index = enum_obj.nameIndex(ip, field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_ty, union_obj, field_src, field_name);
return @intCast(field_index);
+5 -3
View File
@@ -694,8 +694,9 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void {
break :tag_ty try sema.analyzeAsType(&block, tag_type_src, .union_enum_tag_type, type_ref);
},
};
const unrestricted_tag_ty = tag_ty.unrestrictedType(zcu) orelse tag_ty;
// Because the type is explicitly specified, we need to validate it.
if (tag_ty.zigTypeTag(zcu) != .@"enum") return sema.fail(
if (unrestricted_tag_ty.zigTypeTag(zcu) != .@"enum") return sema.fail(
&block,
block.src(.container_arg),
"expected enum tag type, found '{f}'",
@@ -738,9 +739,10 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void {
},
},
};
const unrestricted_enum_tag_ty = enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty;
try sema.ensureLayoutResolved(enum_tag_ty, block.src(.container_arg), .backing_enum);
const enum_obj = ip.loadEnumType(enum_tag_ty.toIntern());
try sema.ensureLayoutResolved(unrestricted_enum_tag_ty, block.src(.container_arg), .backing_enum);
const enum_obj = ip.loadEnumType(unrestricted_enum_tag_ty.toIntern());
if (union_obj.is_reified) {
// We have field names in `union_obj.reified_field_names`, but we haven't
+1 -1
View File
@@ -3241,7 +3241,7 @@ pub fn validateExtern(ty: Type, position: ExternPosition, zcu: *const Zcu) bool
.pointer => {
if (ty.isSlice(zcu)) return false;
const child_ty = ty.childType(zcu);
if (child_ty.zigTypeTag(zcu) == .@"fn") {
if (zcu.intern_pool.isFunctionType(child_ty.toIntern())) {
return ty.isConstPtr(zcu) and validateExternCallconv(child_ty.fnCallingConvention(zcu));
}
return true;
+8 -3
View File
@@ -4165,9 +4165,14 @@ pub const UnionLayout = struct {
pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &zcu.intern_pool;
if (enum_tag.toIntern() == .none) return null;
const enum_tag_key = ip.indexToKey(enum_tag.toIntern()).enum_tag;
assert(enum_tag_key.ty == loaded_union.enum_tag_type);
const loaded_enum = ip.loadEnumType(loaded_union.enum_tag_type);
const enum_tag_key = switch (ip.indexToKey(enum_tag.toIntern())) {
else => unreachable,
.enum_tag => |enum_tag_key| enum_tag_key,
.restricted_value => |restricted_value| ip.indexToKey(restricted_value.unrestricted_value).enum_tag,
};
const enum_tag_ty: Type = .fromInterned(loaded_union.enum_tag_type);
assert(enum_tag_key.ty == (enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty).toIntern());
const loaded_enum = ip.loadEnumType(enum_tag_key.ty);
return loaded_enum.tagValueIndex(ip, enum_tag_key.int);
}
+114 -31
View File
@@ -626,8 +626,8 @@ pub const Object = struct {
try builder.metadataString(compile_unit_dir),
);
const debug_enums_fwd_ref = try builder.debugForwardReference();
const debug_globals_fwd_ref = try builder.debugForwardReference();
const debug_enums_fwd_ref = try builder.metadataForwardReference();
const debug_globals_fwd_ref = try builder.metadataForwardReference();
const debug_compile_unit = try builder.debugCompileUnit(
debug_file,
@@ -701,9 +701,12 @@ pub const Object = struct {
const RestrictedDecls = struct {
len: Builder.Variable.Index,
array: Builder.Variable.Index,
enum_seen: []const Builder.Variable.Index,
values: std.array_hash_map.Auto(InternPool.Index, Builder.Constant),
metadata: Builder.Metadata.Optional,
fn deinit(rd: *RestrictedDecls, gpa: Allocator) void {
gpa.free(rd.enum_seen);
rd.values.deinit(gpa);
rd.* = undefined;
}
@@ -716,7 +719,8 @@ pub const Object = struct {
const zcu = o.zcu;
const target = zcu.getTarget();
const ip = &zcu.intern_pool;
const unrestricted_ty = restricted_ty.unrestrictedType(zcu).?;
const restricted_type_key = ip.indexToKey(restricted_ty.toIntern()).restricted_type;
const unrestricted_type = restricted_type_key.unrestricted_type;
const ty_name = ip.loadRestrictedType(restricted_ty.toIntern()).name.toSlice(ip);
gop.value_ptr.* = .{
@@ -730,7 +734,28 @@ pub const Object = struct {
.void,
.default,
),
.enum_seen = if (ip.isEnumType(unrestricted_type)) enum_seen: {
const owner_mod = zcu.fileByIndex(restricted_type_key.zir_index.resolveFile(ip)).mod.?;
if (owner_mod.optimize_mode != .ReleaseSmall) break :enum_seen &.{};
const field_names = ip.loadEnumType(unrestricted_type).field_names;
const enum_seen = try o.gpa.alloc(Builder.Variable.Index, field_names.len);
errdefer o.gpa.free(enum_seen);
for (enum_seen, field_names.get(ip)) |*global, field_name| {
global.* = try o.builder.addVariable(
try o.builder.strtabStringFmt("{s}.{f}", .{ ty_name, field_name.fmt(ip) }),
.i1,
.default,
);
global.setLinkage(.private, &o.builder);
global.setMutability(.global, &o.builder);
global.setAlignment(InternPool.Alignment.@"1".toLlvm(), &o.builder);
global.setUnnamedAddr(.unnamed_addr, &o.builder);
try global.setInitializer(.false, &o.builder);
}
break :enum_seen enum_seen;
} else &.{},
.values = .empty,
.metadata = .none,
};
gop.value_ptr.len.setLinkage(.private, &o.builder);
gop.value_ptr.len.setMutability(.constant, &o.builder);
@@ -738,23 +763,80 @@ pub const Object = struct {
gop.value_ptr.len.setUnnamedAddr(.unnamed_addr, &o.builder);
gop.value_ptr.array.setLinkage(.private, &o.builder);
gop.value_ptr.array.setMutability(.constant, &o.builder);
gop.value_ptr.array.setAlignment(unrestricted_ty.abiAlignment(zcu).toLlvm(), &o.builder);
// Setting unnamed_addr here would reduce safety, and the module emitting the safety checks may not be the same module
// that defined the restricted type. In any case, llvm will add unnamed_addr itself if no safety checks end up being emitted.
gop.value_ptr.array.setUnnamedAddr(.default, &o.builder);
gop.value_ptr.array.setAlignment(Type.fromInterned(unrestricted_type).abiAlignment(zcu).toLlvm(), &o.builder);
gop.value_ptr.array.setUnnamedAddr(.unnamed_addr, &o.builder);
return gop.value_ptr;
}
fn genRestrictedDecls(o: *Object) Allocator.Error!void {
for (o.restricted_map.values()) |restricted_decls| {
const zcu = o.zcu;
const ip = &zcu.intern_pool;
for (o.restricted_map.keys(), o.restricted_map.values()) |restricted_ty, restricted_decls| {
const len = restricted_decls.values.count();
try restricted_decls.len.setInitializer(try o.builder.intConst(.i32, len), &o.builder);
try restricted_decls.array.setInitializer(switch (len) {
0 => try o.builder.zeroInitConst(.i8), // ensure unique address
0 => try o.builder.structConst(try o.builder.structType(.normal, &.{}), &.{}),
else => try o.builder.arrayConst(
try o.builder.arrayType(len, restricted_decls.values.values()[0].typeOf(&o.builder)),
restricted_decls.values.values(),
),
}, &o.builder);
if (restricted_decls.metadata.unwrap()) |metadata| {
const gpa = zcu.gpa;
const unrestricted_ty = ip.indexToKey(restricted_ty).restricted_type.unrestricted_type;
if (ip.isPointerType(unrestricted_ty)) {
assert(ip.isFunctionType(ip.indexToKey(unrestricted_ty).ptr_type.child));
const callees = try gpa.alloc(Builder.Metadata, len);
defer gpa.free(callees);
for (callees, restricted_decls.values.values()) |*callee, value|
callee.* = try o.builder.metadataConstant(value);
o.builder.resolveMetadataForwardReference(metadata, try o.builder.metadataTuple(callees));
} else {
assert(Type.fromInterned(unrestricted_ty).isAbiInt(zcu));
const ints = try gpa.alloc(std.math.big.int.Const, len);
defer gpa.free(ints);
var range: std.ArrayList(Builder.Metadata) = .empty;
defer range.deinit(gpa);
o.builder.resolveMetadataForwardReference(metadata, range: {
if (len == 0) break :range .empty_tuple;
const values = restricted_decls.values.values();
for (ints, values) |*int, value| int.* = value.toInt(&o.builder) orelse
break :range .empty_tuple;
std.mem.sortUnstable(std.math.big.int.Const, ints, {}, struct {
fn lessThan(_: void, lhs: std.math.big.int.Const, rhs: std.math.big.int.Const) bool {
return lhs.order(rhs).compare(.lt);
}
}.lessThan);
var int_ty = values[0].typeOf(&o.builder);
var start = ints[0];
var end: std.math.big.int.Mutable = .{
.limbs = try gpa.alloc(
std.math.big.Limb,
std.math.big.int.calcNonZeroTwosCompLimbCount(int_ty.scalarBits(&o.builder)),
),
.len = undefined,
.positive = undefined,
};
defer gpa.free(end.limbs);
end.copy(start);
for (ints[1..]) |int| {
end.addScalar(end.toConst(), 1);
if (end.toConst().eql(int)) continue;
try range.appendSlice(gpa, &.{
try o.builder.metadataConstant(try o.builder.bigIntConst(int_ty, start)),
try o.builder.metadataConstant(try o.builder.bigIntConst(int_ty, end.toConst())),
});
start = int;
end.copy(int);
}
end.addScalar(end.toConst(), 1);
try range.appendSlice(gpa, &.{
try o.builder.metadataConstant(try o.builder.bigIntConst(int_ty, start)),
try o.builder.metadataConstant(try o.builder.bigIntConst(int_ty, end.toConst())),
});
break :range try o.builder.metadataTuple(range.items);
});
}
}
}
}
@@ -861,17 +943,17 @@ pub const Object = struct {
if (!o.builder.strip) {
if (o.debug_anyerror_fwd_ref.unwrap()) |fwd_ref| {
const debug_anyerror_type = try o.lowerDebugAnyerrorType();
o.builder.resolveDebugForwardReference(fwd_ref, debug_anyerror_type);
o.builder.resolveMetadataForwardReference(fwd_ref, debug_anyerror_type);
}
try o.flushTypePool(pt);
o.builder.resolveDebugForwardReference(
o.builder.resolveMetadataForwardReference(
o.debug_enums_fwd_ref.unwrap().?,
try o.builder.metadataTuple(o.debug_enums.items),
);
o.builder.resolveDebugForwardReference(
o.builder.resolveMetadataForwardReference(
o.debug_globals_fwd_ref.unwrap().?,
try o.builder.metadataTuple(o.debug_globals.items),
);
@@ -1936,7 +2018,7 @@ pub const Object = struct {
if (!o.builder.strip) {
assert(@intFromEnum(index) == o.debug_types.items.len);
try o.debug_types.ensureUnusedCapacity(gpa, 1);
const fwd_ref = try o.builder.debugForwardReference();
const fwd_ref = try o.builder.metadataForwardReference();
o.debug_types.appendAssumeCapacity(fwd_ref);
if (val == .anyerror_type) {
assert(o.debug_anyerror_fwd_ref.is_none);
@@ -1968,7 +2050,7 @@ pub const Object = struct {
.@"fn" => try o.builder.debugSubroutineType(null),
else => try o.builder.debugSignedType(name_str, 0),
};
o.builder.resolveDebugForwardReference(fwd_ref, debug_incomplete_type);
o.builder.resolveMetadataForwardReference(fwd_ref, debug_incomplete_type);
}
}
/// Should only be called by the `link.ConstPool` implementation.
@@ -1992,7 +2074,7 @@ pub const Object = struct {
assert(o.debug_anyerror_fwd_ref == fwd_ref.toOptional());
} else {
const debug_type = try o.lowerDebugType(pt, ty, fwd_ref);
o.builder.resolveDebugForwardReference(fwd_ref, debug_type);
o.builder.resolveMetadataForwardReference(fwd_ref, debug_type);
}
}
}
@@ -2527,7 +2609,7 @@ pub const Object = struct {
const payload_fwd_ref = if (layout.tag_size == 0)
ty_fwd_ref
else
try o.builder.debugForwardReference();
try o.builder.metadataForwardReference();
for (0..union_type.field_types.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
@@ -2566,7 +2648,7 @@ pub const Object = struct {
return debug_payload_type;
}
o.builder.resolveDebugForwardReference(payload_fwd_ref, debug_payload_type);
o.builder.resolveMetadataForwardReference(payload_fwd_ref, debug_payload_type);
const tag_offset: u64, const payload_offset: u64 = offsets: {
if (layout.tag_align.compare(.gte, layout.payload_align)) {
@@ -3988,6 +4070,11 @@ pub const Object = struct {
const restricted_decls = try o.getRestrictedDecls(ty);
const gop = try restricted_decls.values.getOrPut(o.gpa, arg_val);
if (!gop.found_existing) gop.value_ptr.* = try o.lowerValue(restricted_value.unrestricted_value);
if (restricted_decls.enum_seen.len > 0) enum_seen: {
const unrestricted_val: Value = .fromInterned(restricted_value.unrestricted_value);
const tag_index = unrestricted_val.typeOf(zcu).enumTagFieldIndex(unrestricted_val, zcu) orelse break :enum_seen;
try restricted_decls.enum_seen[tag_index].setInitializer(.true, &o.builder);
}
return o.builder.intConst(.i32, gop.index);
},
.memoized_call => unreachable,
@@ -4002,37 +4089,29 @@ pub const Object = struct {
const zcu = o.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.nav => |nav| {
const base_ptr = try o.lowerNavRef(nav);
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
},
const base_ptr = base_ptr: switch (ptr.base_addr) {
.nav => |nav| try o.lowerNavRef(nav),
.uav => |uav| {
const orig_ptr_ty: Type = .fromInterned(uav.orig_ty);
const base_ptr = try o.lowerUavRef(
break :base_ptr try o.lowerUavRef(
uav.val,
orig_ptr_ty.ptrAlignment(zcu),
orig_ptr_ty.ptrAddressSpace(zcu),
);
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
},
.int => try o.builder.castConst(
.int => return o.builder.castConst(
.inttoptr,
try o.builder.intConst(try o.lowerType(.usize), offset),
try o.lowerType(.fromInterned(ptr.ty)),
),
.eu_payload => |eu_ptr| try o.lowerPtr(
.eu_payload => |eu_ptr| return o.lowerPtr(
eu_ptr,
offset + codegen.errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
zcu,
),
),
.opt_payload => |opt_ptr| try o.lowerPtr(opt_ptr, offset),
.opt_payload => |opt_ptr| return o.lowerPtr(opt_ptr, offset),
.field => |field| {
const agg_ty = Value.fromInterned(field.base).typeOf(zcu).childType(zcu);
const field_off: u64 = switch (agg_ty.zigTypeTag(zcu)) {
@@ -4061,6 +4140,10 @@ pub const Object = struct {
.comptime_field => unreachable,
.comptime_alloc => unreachable,
};
if (offset == 0) return base_ptr;
return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{
try o.builder.intConst(.i64, offset),
});
}
pub fn lowerPtrToVoid(
+68 -21
View File
@@ -826,7 +826,7 @@ fn airCall(self: *FuncGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
};
}
const call = try self.wip.call(
const call = try self.wip.callMetadata(
switch (modifier) {
.auto, .never_inline => .normal,
.never_tail => .notail,
@@ -838,6 +838,17 @@ fn airCall(self: *FuncGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
try o.lowerType(zig_fn_ty),
llvm_fn,
llvm_args.items,
.{
.callees = if (air_call.callee.toIndex()) |callee_inst| switch (self.air.instructions.items(.tag)[@intFromEnum(callee_inst)]) {
else => .none,
.unwrap_restricted, .unwrap_restricted_safe => callees: {
const restricted_ty = self.typeOf(self.air.instructions.items(.data)[@intFromEnum(callee_inst)].ty_op.operand);
const restricted_decls = try o.getRestrictedDecls(restricted_ty);
if (restricted_decls.metadata.is_none) restricted_decls.metadata = .wrap(try o.builder.metadataForwardReference());
break :callees restricted_decls.metadata;
},
} else .none,
},
"",
);
@@ -1667,6 +1678,7 @@ fn lowerTry(
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index, is_dispatch_loop: bool) TodoError!void {
const o = self.object;
const zcu = o.zcu;
const ip = &zcu.intern_pool;
const switch_br = self.air.unwrapSwitch(inst);
@@ -1694,6 +1706,7 @@ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Tod
// This asm is really, really, not what we want. As such, we will construct the jump table manually where
// appropriate (the values are dense and relatively few), and use it when lowering dispatches.
const cond_ty = self.typeOf(switch_br.operand);
const jmp_table: ?SwitchDispatchInfo.JmpTable = jmp_table: {
if (!is_dispatch_loop) break :jmp_table null;
@@ -1706,7 +1719,6 @@ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Tod
// about acceptable - it won't fill L1d cache on most CPUs.
const max_table_len = 1024;
const cond_ty = self.typeOf(switch_br.operand);
switch (cond_ty.zigTypeTag(zcu)) {
.bool, .pointer => break :jmp_table null,
.@"enum", .int, .error_set, .@"struct", .@"union" => {},
@@ -1859,6 +1871,18 @@ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Tod
assert(self.switch_dispatch_info.remove(inst));
};
const restricted_enum_seen = if (ip.isEnumType(cond_ty.toIntern())) restricted_enum_seen: {
const operand_inst = switch_br.operand.toIndex() orelse break :restricted_enum_seen &.{};
switch (self.air.instructions.items(.tag)[@intFromEnum(operand_inst)]) {
else => break :restricted_enum_seen &.{},
.unwrap_restricted, .unwrap_restricted_safe => {
const restricted_ty = self.typeOf(self.air.instructions.items(.data)[@intFromEnum(operand_inst)].ty_op.operand);
const restricted_decls = try o.getRestrictedDecls(restricted_ty);
break :restricted_enum_seen restricted_decls.enum_seen;
},
}
} else &.{};
// Generate the initial dispatch.
// If this is a simple `switch_br`, this is the only dispatch.
try self.lowerSwitchDispatch(inst, switch_br.operand, dispatch_info);
@@ -1869,6 +1893,16 @@ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Tod
const case_block = case_blocks[case.idx];
self.wip.cursor = .{ .block = case_block };
if (switch_br.getHint(case.idx) == .cold) _ = try self.wip.callIntrinsicAssumeCold();
if (restricted_enum_seen.len > 0) restricted_enum_seen: {
var maybe_any_seen: ?Builder.Value = null;
for (case.items) |item| {
const tag_index = cond_ty.enumTagFieldIndex(.fromInterned(item.toInterned().?), zcu) orelse break :restricted_enum_seen;
const tag_seen = try self.wip.load(.normal, .i1, restricted_enum_seen[tag_index].toValue(&o.builder), InternPool.Alignment.@"1".toLlvm(), "");
maybe_any_seen = if (maybe_any_seen) |any_seen| try self.wip.bin(.@"or", any_seen, tag_seen, "") else tag_seen;
}
assert(case.ranges.len == 0); // not supported by Sema yet
_ = try self.wip.callIntrinsic(.normal, .none, .assume, &.{}, &.{maybe_any_seen.?}, "");
}
try self.genBodyDebugScope(null, case.body, .none);
}
self.wip.cursor = .{ .block = case_blocks[case_blocks.len - 1] };
@@ -2124,11 +2158,7 @@ fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder
const elem_align = slice_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu));
const access_kind: Builder.MemoryAccessKind = if (slice_info.flags.is_volatile) .@"volatile" else .normal;
self.maybeMarkAllowZeroAccess(slice_info);
if (isByRef(elem_ty, zcu)) {
return self.loadByRef(ptr, elem_ty, elem_align.toLlvm(), access_kind);
} else {
return self.loadTruncate(access_kind, elem_ty, ptr, elem_align.toLlvm());
}
return self.load(ptr, elem_ty, elem_align.toLlvm(), access_kind);
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Value {
@@ -2153,12 +2183,7 @@ fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder
const elem_ty = array_ty.childType(zcu);
if (isByRef(array_ty, zcu)) {
const elem_ptr = try self.ptraddScaled(array_llvm_val, rhs, elem_ty.abiSize(zcu));
if (isByRef(elem_ty, zcu)) {
const elem_align = elem_ty.abiAlignment(zcu).toLlvm();
return self.loadByRef(elem_ptr, elem_ty, elem_align, .normal);
} else {
return self.loadTruncate(.normal, elem_ty, elem_ptr, .default);
}
return self.load(elem_ptr, elem_ty, elem_ty.abiAlignment(zcu).toLlvm(), .normal);
}
// This branch can be reached for vectors, which are always by-value.
@@ -2277,11 +2302,7 @@ fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Build
else => struct_ptr_align.minStrict(.fromLog2Units(@ctz(offset))),
};
if (isByRef(field_ty, zcu)) {
return self.loadByRef(field_ptr, field_ty, field_ptr_align.toLlvm(), .normal);
} else {
return self.loadTruncate(.normal, field_ty, field_ptr, field_ptr_align.toLlvm());
}
return self.load(field_ptr, field_ty, field_ptr_align.toLlvm(), .normal);
}
fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Value {
@@ -3259,6 +3280,7 @@ fn airUnwrapRestricted(fg: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocat
const target = zcu.getTarget();
const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const unrestricted_align = unrestricted_ty.abiAlignment(zcu);
const restricted_ty = fg.typeOf(ty_op.operand);
const operand = try fg.resolveInst(ty_op.operand);
const restricted_decls = try o.getRestrictedDecls(restricted_ty);
@@ -3281,7 +3303,13 @@ fn airUnwrapRestricted(fg: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocat
fg.wip.cursor = .{ .block = valid_block };
}
const ptr = try fg.ptraddScaled(restricted_decls.array.toValue(&o.builder), operand, unrestricted_ty.abiSize(zcu));
return fg.load(ptr, unrestricted_ty, unrestricted_ty.abiAlignment(zcu).toLlvm(), .normal);
if (isByRef(unrestricted_ty, zcu)) return fg.loadByRef(ptr, unrestricted_ty, unrestricted_align.toLlvm(), .normal);
return fg.wip.loadMetadata(.normal, try o.lowerType(unrestricted_ty), ptr, unrestricted_align.toLlvm(), .{
.range = if (unrestricted_ty.isAbiInt(zcu)) range: {
if (restricted_decls.metadata.is_none) restricted_decls.metadata = .wrap(try o.builder.metadataForwardReference());
break :range restricted_decls.metadata;
} else .none,
}, "");
}
fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Value {
@@ -6042,8 +6070,27 @@ fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Va
}
if (layout.tag_size != 0) {
const loaded_enum = ip.loadEnumType(union_obj.enum_tag_type);
const llvm_tag_val = switch (loaded_enum.field_values.getOrNone(ip, extra.field_index)) {
const tag_ty: Type = .fromInterned(union_obj.enum_tag_type);
const llvm_tag_val = if (tag_ty.unrestrictedType(zcu)) |unrestricted_tag_ty| llvm_tag_val: {
const restricted_decls = try o.getRestrictedDecls(tag_ty);
const unrestricted_tag_val = try self.pt.enumValueFieldIndex(unrestricted_tag_ty, extra.field_index);
const tag_val = try self.pt.intern(.{ .restricted_value = .{
.ty = union_obj.enum_tag_type,
.unrestricted_value = unrestricted_tag_val.toIntern(),
} });
const gop = try restricted_decls.values.getOrPut(o.gpa, tag_val);
if (!gop.found_existing) gop.value_ptr.* = try o.lowerValue(unrestricted_tag_val.toIntern());
if (restricted_decls.enum_seen.len > 0) enum_seen: {
const tag_index = unrestricted_tag_ty.enumTagFieldIndex(unrestricted_tag_val, zcu) orelse break :enum_seen;
_ = try self.wip.store(
.normal,
.true,
restricted_decls.enum_seen[tag_index].toValue(&o.builder),
InternPool.Alignment.@"1".toLlvm(),
);
}
break :llvm_tag_val try o.builder.intConst(.i32, gop.index);
} else switch (ip.loadEnumType(union_obj.enum_tag_type).field_values.getOrNone(ip, extra.field_index)) {
.none => try o.builder.intConst(
try o.lowerType(.fromInterned(union_obj.enum_tag_type)),
extra.field_index, // auto-numbered
+2 -1
View File
@@ -172764,8 +172764,9 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
var res = try cg.tempAllocMem(union_ty);
const union_layout = union_ty.unionGetLayout(zcu);
if (union_layout.tag_size > 0) {
const tag_ty = union_ty.unionTagTypeRuntime(zcu).?;
var tag_temp = try cg.tempFromValue(try pt.enumValueFieldIndex(
union_ty.unionTagTypeRuntime(zcu).?,
tag_ty.unrestrictedType(zcu) orelse tag_ty,
union_init.field_index,
));
try res.write(&tag_temp, .{
+2 -1
View File
@@ -4046,7 +4046,8 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
const union_layout = Type.getUnionLayout(loaded_union, zcu);
try diw.writeUleb128(union_layout.abi_size);
try diw.writeUleb128(union_layout.abi_align.toByteUnits().?);
const loaded_tag = ip.loadEnumType(loaded_union.enum_tag_type);
const enum_tag_ty: Type = .fromInterned(loaded_union.enum_tag_type);
const loaded_tag = ip.loadEnumType((enum_tag_ty.unrestrictedType(zcu) orelse enum_tag_ty).toIntern());
if (loaded_union.has_runtime_tag) {
try wip_nav.abbrevCode(.tagged_union);
try wip_nav.infoSectionOffset(
+10
View File
@@ -51,6 +51,7 @@
#include <llvm/Target/CodeGenCWrappers.h>
#include <llvm/Transforms/IPO.h>
#include <llvm/Transforms/IPO/AlwaysInliner.h>
#include <llvm/Transforms/IPO/GlobalOpt.h>
#include <llvm/Transforms/Instrumentation/ThreadSanitizer.h>
#include <llvm/Transforms/Instrumentation/SanitizerCoverage.h>
#include <llvm/Transforms/Scalar.h>
@@ -348,6 +349,10 @@ ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machi
});
pass_builder.registerOptimizerLastEPCallback([&](ModulePassManager &module_pm, OptimizationLevel level, ThinOrFullLTOPhase lto_phase) {
// Restricted enums require an extra global optimization pass sometime after
// DropUnnecessaryAssumesPass to fully eliminate the helper global variables.
if (level.isOptimizingForSize()) module_pm.addPass(GlobalOptPass());
if (!early_san) {
// Code coverage instrumentation.
if (options->sancov) {
@@ -404,6 +409,11 @@ ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machi
}
}
if (false) {
module_pm.printPipeline(outs(), [](StringRef S) { return S; });
outs() << '\n';
}
// Optimization phase
module_pm.run(llvm_module, module_am);