Merge pull request 'llvm: some random fixes' (#31552) from llvm-fixes into master

Reviewed-on: https://codeberg.org/ziglang/zig/pulls/31552
Reviewed-by: Andrew Kelley <andrew@ziglang.org>
This commit is contained in:
Andrew Kelley
2026-03-18 01:02:55 +01:00
+116 -99
View File
@@ -1438,8 +1438,7 @@ pub const Object = struct {
const param = wip.arg(llvm_arg_i);
llvm_arg_i += 1;
const field_ptr = try wip.gepStruct(llvm_ty, arg_ptr, field_i, "");
const alignment =
Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
const alignment = Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8));
_ = try wip.store(.normal, param, field_ptr, alignment);
}
@@ -1625,13 +1624,7 @@ pub const Object = struct {
.pt = pt,
.err_msg = null,
};
ng.genDecl() catch |err| switch (err) {
error.CodegenFail => switch (pt.zcu.codegenFailMsg(nav_index, ng.err_msg.?)) {
error.CodegenFail => return,
error.OutOfMemory => |e| return e,
},
else => |e| return e,
};
try ng.genDecl();
try self.flushTypePool(pt);
}
@@ -1713,10 +1706,7 @@ pub const Object = struct {
const global_index = variable_index.ptrConst(&o.builder).global;
gop.value_ptr.* = global_index;
// This line invalidates `gop`.
const init_val = o.lowerValue(pt, exported_value) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFail => return error.AnalysisFail,
};
const init_val = try o.lowerValue(pt, exported_value);
try variable_index.setInitializer(init_val, &o.builder);
break :i global_index;
};
@@ -1834,6 +1824,9 @@ pub const Object = struct {
pub fn updateContainerType(o: *Object, pt: Zcu.PerThread, ty: InternPool.Index, success: bool) Allocator.Error!void {
try o.type_pool.updateContainerType(pt, .{ .llvm = o }, ty, success);
if (o.named_enum_map.get(ty)) |function_index| {
try o.updateIsNamedEnumValueFunction(pt, .fromInterned(ty), function_index);
}
}
/// Should only be called by the `link.ConstPool` implementation.
@@ -2907,7 +2900,7 @@ pub const Object = struct {
uav: InternPool.Index,
llvm_addr_space: Builder.AddrSpace,
alignment: InternPool.Alignment,
) Error!Builder.Variable.Index {
) Allocator.Error!Builder.Variable.Index {
assert(alignment != .none);
// TODO: Add address space to the anon_decl_map
const gop = try o.uav_map.getOrPut(o.gpa, uav);
@@ -3506,7 +3499,7 @@ pub const Object = struct {
);
}
fn lowerValue(o: *Object, pt: Zcu.PerThread, arg_val: InternPool.Index) Error!Builder.Constant {
fn lowerValue(o: *Object, pt: Zcu.PerThread, arg_val: InternPool.Index) Allocator.Error!Builder.Constant {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
@@ -4019,7 +4012,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
ptr_val: InternPool.Index,
prev_offset: u64,
) Error!Builder.Constant {
) Allocator.Error!Builder.Constant {
const zcu = pt.zcu;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
@@ -4086,7 +4079,7 @@ pub const Object = struct {
o: *Object,
pt: Zcu.PerThread,
uav: InternPool.Key.Ptr.BaseAddr.Uav,
) Error!Builder.Constant {
) Allocator.Error!Builder.Constant {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const uav_val = uav.val;
@@ -4363,6 +4356,58 @@ pub const Object = struct {
const index = try o.type_pool.get(pt, .{ .llvm = o }, ty.toIntern());
return o.lazy_abi_aligns.items[@intFromEnum(index)];
}
fn updateIsNamedEnumValueFunction(
o: *Object,
pt: Zcu.PerThread,
enum_ty: Type,
function_index: Builder.Function.Index,
) Allocator.Error!void {
const zcu = pt.zcu;
const builder = &o.builder;
const loaded_enum = zcu.intern_pool.loadEnumType(enum_ty.toIntern());
function_index.ptrConst(builder).global.ptr(builder).type = try builder.fnType(
.i1,
&.{try o.lowerType(pt, .fromInterned(loaded_enum.int_tag_type))},
.normal,
);
var attributes: Builder.FunctionAttributes.Wip = .{};
defer attributes.deinit(builder);
try o.addCommonFnAttributes(&attributes, zcu.root_mod, zcu.root_mod.omit_frame_pointer);
function_index.setLinkage(.internal, builder);
function_index.setCallConv(.fastcc, builder);
function_index.setAttributes(try attributes.finish(builder), builder);
var wip: Builder.WipFunction = try .init(builder, .{
.function = function_index,
.strip = true,
});
defer wip.deinit();
wip.cursor = .{ .block = try wip.block(0, "Entry") };
const named_block = try wip.block(@intCast(loaded_enum.field_names.len), "Named");
const unnamed_block = try wip.block(1, "Unnamed");
const tag_int_value = wip.arg(0);
var wip_switch = try wip.@"switch"(tag_int_value, unnamed_block, @intCast(loaded_enum.field_names.len), .none);
defer wip_switch.finish(&wip);
for (0..loaded_enum.field_names.len) |field_index| {
const this_tag_int_value = try o.lowerValue(
pt,
(try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
);
try wip_switch.addCase(this_tag_int_value, named_block, &wip);
}
wip.cursor = .{ .block = named_block };
_ = try wip.ret(.true);
wip.cursor = .{ .block = unnamed_block };
_ = try wip.ret(.false);
try wip.finish();
}
};
pub const NavGen = struct {
@@ -6105,7 +6150,7 @@ pub const FuncGen = struct {
const body = unwrapped_try.else_body;
const err_union_ty = self.typeOf(unwrapped_try.error_union);
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, false, is_unused, err_cold);
return lowerTry(self, err_union, body, err_union_ty, false, .none, false, is_unused, err_cold);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
@@ -6113,12 +6158,13 @@ pub const FuncGen = struct {
const unwrapped_try = self.air.unwrapTryPtr(inst);
const err_union_ptr = try self.resolveInst(unwrapped_try.error_union_ptr);
const body = unwrapped_try.else_body;
const err_union_ty = self.typeOf(unwrapped_try.error_union_ptr).childType(zcu);
const err_union_ptr_ty = self.typeOf(unwrapped_try.error_union_ptr);
const err_union_ty = err_union_ptr_ty.childType(zcu);
const is_unused = self.liveness.isUnused(inst);
self.maybeMarkAllowZeroAccess(self.typeOf(unwrapped_try.error_union_ptr).ptrInfo(zcu));
return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused, err_cold);
return lowerTry(self, err_union_ptr, body, err_union_ty, true, err_union_ptr_ty.ptrAlignment(zcu), true, is_unused, err_cold);
}
fn lowerTry(
@@ -6127,6 +6173,7 @@ pub const FuncGen = struct {
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
operand_ptr_align: InternPool.Alignment,
can_elide_load: bool,
is_unused: bool,
err_cold: bool,
@@ -6139,15 +6186,19 @@ pub const FuncGen = struct {
const err_union_llvm_ty = try o.lowerType(pt, err_union_ty);
const error_type = try o.errorIntType(pt);
const err_set_align: InternPool.Alignment, const payload_align: InternPool.Alignment = if (operand_is_ptr) .{
operand_ptr_align.minStrict(Type.anyerror.abiAlignment(zcu)),
operand_ptr_align.minStrict(payload_ty.abiAlignment(zcu)),
} else .{ .none, .none };
if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
const loaded = loaded: {
const access_kind: Builder.MemoryAccessKind =
if (err_union_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
if (!payload_has_bits) {
// TODO add alignment to this load
break :loaded if (operand_is_ptr)
try fg.wip.load(access_kind, error_type, err_union, .default, "")
try fg.wip.load(access_kind, error_type, err_union, err_set_align.toLlvm(), "")
else
err_union;
}
@@ -6155,12 +6206,11 @@ pub const FuncGen = struct {
if (operand_is_ptr or isByRef(err_union_ty, zcu)) {
const err_field_ptr =
try fg.wip.gepStruct(err_union_llvm_ty, err_union, err_field_index, "");
// TODO add alignment to this load
break :loaded try fg.wip.load(
if (operand_is_ptr) access_kind else .normal,
error_type,
err_field_ptr,
.default,
err_set_align.toLlvm(),
"",
);
}
@@ -6186,15 +6236,14 @@ pub const FuncGen = struct {
return fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
} else if (isByRef(err_union_ty, zcu)) {
const payload_ptr = try fg.wip.gepStruct(err_union_llvm_ty, err_union, offset, "");
const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
if (isByRef(payload_ty, zcu)) {
if (can_elide_load)
return payload_ptr;
return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
return fg.loadByRef(payload_ptr, payload_ty, payload_align.toLlvm(), .normal);
}
const load_ty = err_union_llvm_ty.structFields(&o.builder)[offset];
return fg.wip.load(.normal, load_ty, payload_ptr, payload_alignment, "");
return fg.wip.load(.normal, load_ty, payload_ptr, payload_align.toLlvm(), "");
}
return fg.wip.extractValue(err_union, &.{offset}, "");
}
@@ -6667,20 +6716,20 @@ pub const FuncGen = struct {
const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
const elem_ty = slice_ty.childType(zcu);
const slice_info = slice_ty.ptrInfo(zcu);
assert(slice_info.flags.size == .slice);
const elem_ty: Type = .fromInterned(slice_info.child);
const llvm_elem_ty = try o.lowerType(pt, elem_ty);
const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
const elem_align = slice_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu));
const access_kind: Builder.MemoryAccessKind = if (slice_info.flags.is_volatile) .@"volatile" else .normal;
self.maybeMarkAllowZeroAccess(slice_info);
if (isByRef(elem_ty, zcu)) {
self.maybeMarkAllowZeroAccess(slice_ty.ptrInfo(zcu));
const slice_align = (slice_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu))).toLlvm();
return self.loadByRef(ptr, elem_ty, slice_align, if (slice_ty.isVolatilePtr(zcu)) .@"volatile" else .normal);
return self.loadByRef(ptr, elem_ty, elem_align.toLlvm(), access_kind);
} else {
return self.loadTruncate(access_kind, elem_ty, ptr, elem_align.toLlvm());
}
self.maybeMarkAllowZeroAccess(slice_ty.ptrInfo(zcu));
return self.load(ptr, slice_ty);
}
fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@@ -7461,7 +7510,7 @@ pub const FuncGen = struct {
if (optional_ty.optionalReprIsPayload(zcu)) {
const loaded = if (operand_is_ptr)
try self.wip.load(access_kind, optional_llvm_ty, operand, .default, "")
try self.wip.load(access_kind, optional_llvm_ty, operand, operand_ty.ptrAlignment(zcu).toLlvm(), "")
else
operand;
if (payload_ty.isSlice(zcu)) {
@@ -7479,7 +7528,7 @@ pub const FuncGen = struct {
if (!payload_ty.hasRuntimeBits(zcu)) {
const loaded = if (operand_is_ptr)
try self.wip.load(access_kind, optional_llvm_ty, operand, .default, "")
try self.wip.load(access_kind, optional_llvm_ty, operand, operand_ty.ptrAlignment(zcu).toLlvm(), "")
else
operand;
return self.wip.icmp(cond, loaded, try o.builder.intValue(.i8, 0), "");
@@ -7522,7 +7571,7 @@ pub const FuncGen = struct {
if (!payload_ty.hasRuntimeBits(zcu)) {
const loaded = if (operand_is_ptr)
try self.wip.load(access_kind, try o.lowerType(pt, err_union_ty), operand, .default, "")
try self.wip.load(access_kind, try o.lowerType(pt, err_union_ty), operand, operand_ty.ptrAlignment(zcu).toLlvm(), "")
else
operand;
return self.wip.icmp(cond, loaded, zero, "");
@@ -7532,9 +7581,13 @@ pub const FuncGen = struct {
const loaded = if (operand_is_ptr or isByRef(err_union_ty, zcu)) loaded: {
const err_union_llvm_ty = try o.lowerType(pt, err_union_ty);
const err_alignment = if (operand_is_ptr)
operand_ty.ptrAlignment(zcu).minStrict(Type.anyerror.abiAlignment(zcu))
else
.none;
const err_field_ptr =
try self.wip.gepStruct(err_union_llvm_ty, operand, err_field_index, "");
break :loaded try self.wip.load(access_kind, error_type, err_field_ptr, .default, "");
break :loaded try self.wip.load(access_kind, error_type, err_field_ptr, err_alignment.toLlvm(), "");
} else try self.wip.extractValue(operand, &.{err_field_index}, "");
return self.wip.icmp(cond, loaded, zero, "");
}
@@ -7579,6 +7632,7 @@ pub const FuncGen = struct {
self.maybeMarkAllowZeroAccess(optional_ptr_ty.ptrInfo(zcu));
// We have a pointer to a i8. We need to set it to 1 and then return the same pointer.
// Default alignment store because align of the non null bit is 1 anyway.
_ = try self.wip.store(access_kind, non_null_bit, operand, .default);
return operand;
}
@@ -7594,7 +7648,7 @@ pub const FuncGen = struct {
self.maybeMarkAllowZeroAccess(optional_ptr_ty.ptrInfo(zcu));
// TODO set alignment on this store
// Default alignment store because align of the non null bit is 1 anyway.
_ = try self.wip.store(access_kind, non_null_bit, non_null_ptr, .default);
// Then return the payload pointer (only if it's used).
@@ -7682,7 +7736,7 @@ pub const FuncGen = struct {
self.maybeMarkAllowZeroAccess(operand_ty.ptrInfo(zcu));
return self.wip.load(access_kind, error_type, operand, .default, "");
return self.wip.load(access_kind, error_type, operand, operand_ty.ptrAlignment(zcu).toLlvm(), "");
}
const offset = try errUnionErrorOffset(payload_ty, pt);
@@ -7706,6 +7760,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const err_union_ptr_ty = self.typeOf(ty_op.operand);
const err_union_ty = err_union_ptr_ty.childType(zcu);
const err_union_ptr_align = err_union_ptr_ty.ptrAlignment(zcu);
const payload_ty = err_union_ty.errorUnionPayload(zcu);
const non_error_val = try o.builder.intValue(try o.errorIntType(pt), 0);
@@ -7715,8 +7770,7 @@ pub const FuncGen = struct {
if (!payload_ty.hasRuntimeBits(zcu)) {
self.maybeMarkAllowZeroAccess(err_union_ptr_ty.ptrInfo(zcu));
_ = try self.wip.store(access_kind, non_error_val, operand, .default);
_ = try self.wip.store(access_kind, non_error_val, operand, err_union_ptr_align.toLlvm());
return operand;
}
const err_union_llvm_ty = try o.lowerType(pt, err_union_ty);
@@ -7724,7 +7778,7 @@ pub const FuncGen = struct {
self.maybeMarkAllowZeroAccess(err_union_ptr_ty.ptrInfo(zcu));
const err_int_ty = try pt.errorIntType();
const error_alignment = err_int_ty.abiAlignment(zcu).toLlvm();
const error_alignment = err_int_ty.abiAlignment(zcu).minStrict(err_union_ptr_align).toLlvm();
const error_offset = try errUnionErrorOffset(payload_ty, pt);
// First set the non-error value.
const non_null_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, error_offset, "");
@@ -8405,9 +8459,8 @@ pub const FuncGen = struct {
_ = try self.wip.store(.normal, result_val, field_ptr, result_alignment);
}
{
const overflow_alignment = comptime Builder.Alignment.fromByteUnits(1);
const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, overflow_index, "");
_ = try self.wip.store(.normal, overflow_bit, field_ptr, overflow_alignment);
_ = try self.wip.store(.normal, overflow_bit, field_ptr, comptime .fromByteUnits(1));
}
return alloca_inst;
@@ -8767,9 +8820,8 @@ pub const FuncGen = struct {
_ = try self.wip.store(.normal, result, field_ptr, result_alignment);
}
{
const field_alignment = comptime Builder.Alignment.fromByteUnits(1);
const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, overflow_index, "");
_ = try self.wip.store(.normal, overflow_bit, field_ptr, field_alignment);
_ = try self.wip.store(.normal, overflow_bit, field_ptr, comptime .fromByteUnits(1));
}
return alloca_inst;
}
@@ -9925,15 +9977,18 @@ pub const FuncGen = struct {
const union_ptr = try self.resolveInst(bin_op.lhs);
const new_tag = try self.resolveInst(bin_op.rhs);
const union_ptr_align = un_ptr_ty.ptrAlignment(zcu);
if (layout.payload_size == 0) {
// TODO alignment on this store
_ = try self.wip.store(access_kind, new_tag, union_ptr, .default);
_ = try self.wip.store(access_kind, new_tag, union_ptr, union_ptr_align.toLlvm());
return .none;
}
const tag_index = @intFromBool(layout.tag_align.compare(.lt, layout.payload_align));
const tag_field_ptr = try self.wip.gepStruct(try o.lowerType(pt, un_ty), union_ptr, tag_index, "");
// TODO alignment on this store
_ = try self.wip.store(access_kind, new_tag, tag_field_ptr, .default);
const tag_ptr_align: InternPool.Alignment = switch (layout.tagOffset()) {
0 => union_ptr_align,
else => |off| .minStrict(union_ptr_align, .fromLog2Units(@ctz(off))),
};
_ = try self.wip.store(access_kind, new_tag, tag_field_ptr, tag_ptr_align.toLlvm());
return .none;
}
@@ -10106,56 +10161,19 @@ pub const FuncGen = struct {
const pt = self.ng.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes (`updateContainerType` will be called) and re-emit this function
const gop = try o.named_enum_map.getOrPut(o.gpa, enum_ty.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(o.named_enum_map.remove(enum_ty.toIntern()));
const target = &zcu.root_mod.resolved_target.result;
const function_index = try o.builder.addFunction(
try o.builder.fnType(.i1, &.{try o.lowerType(pt, Type.fromInterned(enum_type.int_tag_type))}, .normal),
try o.builder.strtabStringFmt("__zig_is_named_enum_value_{f}", .{enum_type.name.fmt(ip)}),
toLlvmAddressSpace(.generic, target),
// Dummy function type; `updateIsNamedEnumValue` will replace it with the correct type.
// TODO: change the builder API so we don't need to do this.
try o.builder.fnType(.void, &.{}, .normal),
try o.builder.strtabStringFmt("__zig_is_named_enum_value_{f}", .{enum_ty.containerTypeName(ip).fmt(ip)}),
toLlvmAddressSpace(.generic, zcu.getTarget()),
);
var attributes: Builder.FunctionAttributes.Wip = .{};
defer attributes.deinit(&o.builder);
try o.addCommonFnAttributes(&attributes, zcu.root_mod, zcu.root_mod.omit_frame_pointer);
function_index.setLinkage(.internal, &o.builder);
function_index.setCallConv(.fastcc, &o.builder);
function_index.setAttributes(try attributes.finish(&o.builder), &o.builder);
gop.value_ptr.* = function_index;
var wip = try Builder.WipFunction.init(&o.builder, .{
.function = function_index,
.strip = true,
});
defer wip.deinit();
wip.cursor = .{ .block = try wip.block(0, "Entry") };
const named_block = try wip.block(@intCast(enum_type.field_names.len), "Named");
const unnamed_block = try wip.block(1, "Unnamed");
const tag_int_value = wip.arg(0);
var wip_switch = try wip.@"switch"(tag_int_value, unnamed_block, @intCast(enum_type.field_names.len), .none);
defer wip_switch.finish(&wip);
for (0..enum_type.field_names.len) |field_index| {
const this_tag_int_value = try o.lowerValue(
pt,
(try pt.enumValueFieldIndex(enum_ty, @intCast(field_index))).toIntern(),
);
try wip_switch.addCase(this_tag_int_value, named_block, &wip);
}
wip.cursor = .{ .block = named_block };
_ = try wip.ret(.true);
wip.cursor = .{ .block = unnamed_block };
_ = try wip.ret(.false);
try wip.finish();
try o.updateIsNamedEnumValueFunction(pt, enum_ty, function_index);
return function_index;
}
@@ -10824,9 +10842,8 @@ pub const FuncGen = struct {
comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Rw.read) == 0);
comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Rw.write) == 1);
// TODO these two asserts should be able to be comptime because the type is a u2
assert(prefetch.locality >= 0);
assert(prefetch.locality <= 3);
comptime assert(prefetch.locality >= 0);
comptime assert(prefetch.locality <= 3);
comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Cache.instruction) == 0);
comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Cache.data) == 1);