change runtime representation of restricted values

This commit is contained in:
Jacob Young
2026-04-14 05:07:11 -04:00
parent 24bf438708
commit 453df509b8
17 changed files with 1573 additions and 816 deletions
+5 -1
View File
@@ -501,7 +501,11 @@ pub fn resolve(options: Options) ResolveError!Config {
};
};
const backend_supports_error_tracing = target_util.backendSupportsFeature(backend, options.incremental, .error_return_trace);
const backend_supports_error_tracing = target_util.backendSupportsFeature(.error_return_trace, .{
.backend = backend,
.incremental = options.incremental,
.use_new_linker = use_new_linker,
});
const root_error_tracing = b: {
if (options.root_error_tracing) |x| break :b x;
+22 -65
View File
@@ -1017,10 +1017,10 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
.generic_poison => unreachable,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => ptrAbiAlignment(target),
.direct => return abiAlignment(.fromInterned(restricted_type.unrestricted_type), zcu),
},
.restricted_type => |restricted_type| if (zcu.backendSupportsFeature(.restricted_types))
.fromByteUnits(std.zig.target.intAlignment(target, 32))
else
abiAlignment(.fromInterned(restricted_type.unrestricted_type), zcu),
.tuple_type => |tuple| {
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
@@ -1171,10 +1171,10 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
.anyopaque => unreachable,
.generic_poison => unreachable,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => ptrAbiSize(target),
.direct => return abiSize(.fromInterned(restricted_type.unrestricted_type), zcu),
},
.restricted_type => |restricted_type| if (zcu.backendSupportsFeature(.restricted_types))
std.zig.target.intByteSize(target, 32)
else
abiSize(.fromInterned(restricted_type.unrestricted_type), zcu),
.tuple_type => |tuple| switch (ty.classify(zcu)) {
// `structFieldOffset` is bogus on NPV tuples, because there may be some fields with
// non-zero size.
@@ -1301,10 +1301,10 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
.generic_poison => unreachable,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => target.ptrBitWidth(),
.direct => return bitSize(.fromInterned(restricted_type.unrestricted_type), zcu),
},
.restricted_type => |restricted_type| if (zcu.backendSupportsFeature(.restricted_types))
32
else
bitSize(.fromInterned(restricted_type.unrestricted_type), zcu),
.struct_type => {
const struct_obj = ip.loadStructType(ty.toIntern());
switch (struct_obj.layout) {
@@ -1362,17 +1362,6 @@ pub fn unrestrictedType(ty: Type, zcu: *const Zcu) ?Type {
};
}
const RestrictedRepr = enum { indirect, direct };
pub fn restrictedRepr(ty: Type, zcu: *const Zcu) RestrictedRepr {
return restrictedReprByTrackedInst(zcu.intern_pool.indexToKey(ty.toIntern()).restricted_type.zir_index, zcu);
}
pub fn restrictedReprByTrackedInst(zir_index: InternPool.TrackedInst.Index, zcu: *const Zcu) RestrictedRepr {
return switch (zcu.fileByIndex(zir_index.resolveFile(&zcu.intern_pool)).mod.?.optimize_mode) {
.Debug, .ReleaseSafe => if (zcu.backendSupportsFeature(.restricted_types)) .indirect else .direct,
.ReleaseFast, .ReleaseSmall => .direct,
};
}
pub fn isSinglePointer(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.flags.size == .one,
@@ -1450,24 +1439,16 @@ pub fn isCPtr(ty: Type, zcu: *const Zcu) bool {
pub fn isPtrAtRuntime(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.slice => false,
.one, .many, .c => true,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :ty ip.indexToKey(restricted_type.unrestricted_type),
},
.opt_type => |child| opt_child: switch (ip.indexToKey(child)) {
.opt_type => |child| switch (ip.indexToKey(child)) {
.ptr_type => |p| switch (p.flags.size) {
.slice, .c => false,
.many, .one => !p.flags.is_allowzero,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :opt_child ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
},
else => false,
@@ -1483,21 +1464,13 @@ pub fn ptrAllowsZero(ty: Type, zcu: *const Zcu) bool {
/// See also `isPtrLikeOptional`.
pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
.opt_type => |opt_child_type| opt_child_type == .anyerror_type or opt_child: switch (ip.indexToKey(opt_child_type)) {
.opt_type => |opt_child_type| opt_child_type == .anyerror_type or switch (ip.indexToKey(opt_child_type)) {
.ptr_type => |ptr_type| ptr_type.flags.size != .c and !ptr_type.flags.is_allowzero,
.error_set_type, .inferred_error_set_type => true,
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :opt_child ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => false,
.direct => continue :ty ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
};
}
@@ -1506,23 +1479,15 @@ pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
/// address value, using 0 for null. Note that this returns true for C pointers.
pub fn isPtrLikeOptional(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
.opt_type => |opt_child_type| opt_child: switch (ip.indexToKey(opt_child_type)) {
.opt_type => |opt_child_type| switch (ip.indexToKey(opt_child_type)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.slice, .c => false,
.many, .one => !ptr_type.flags.is_allowzero,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :opt_child ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => false,
.direct => continue :ty ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
};
}
@@ -2056,20 +2021,12 @@ pub fn fnCallingConvention(ty: Type, zcu: *const Zcu) std.builtin.CallingConvent
return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.cc;
}
pub fn isValidParamType(self: Type, zcu: *const Zcu) bool {
if (self.toIntern() == .generic_poison_type) return true;
return switch (self.zigTypeTag(zcu)) {
.@"opaque", .noreturn => false,
else => true,
};
pub fn isValidParamType(ty: Type, zcu: *const Zcu) bool {
return ty.toIntern() == .noreturn_type or ty.isValidReturnType(zcu);
}
pub fn isValidReturnType(self: Type, zcu: *const Zcu) bool {
if (self.toIntern() == .generic_poison_type) return true;
return switch (self.zigTypeTag(zcu)) {
.@"opaque" => false,
else => true,
};
pub fn isValidReturnType(ty: Type, zcu: *const Zcu) bool {
return ty.toIntern() == .generic_poison_type or !zcu.intern_pool.isOpaqueType(ty.toIntern());
}
/// Asserts the type is a function.
+5 -1
View File
@@ -4001,7 +4001,11 @@ pub const Feature = enum {
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
return target_util.backendSupportsFeature(backend, zcu.comp.config.incremental, feature);
return target_util.backendSupportsFeature(feature, .{
.backend = backend,
.incremental = zcu.comp.config.incremental,
.use_new_linker = zcu.comp.config.use_new_linker,
});
}
pub const AtomicPtrAlignmentError = error{
+40 -52
View File
@@ -233,6 +233,7 @@ const LazySymbolStructure = struct {
pub const Operation = enum {
end_ptr_inc,
append_restricted,
pub fn apply(operation: Operation, slice: []u8, target_opts: struct {
ptr_bit_width: u16,
@@ -253,6 +254,7 @@ const LazySymbolStructure = struct {
);
},
},
.append_restricted => unreachable,
}
}
};
@@ -290,16 +292,6 @@ pub fn getLazySymbolInfo(
.structure => .{},
.attributes => .{ .required_alignment = .@"1" },
},
.restricted_type => |restricted_type| switch (kind) {
.structure => .{},
.attributes => {
const restricted_ty: Type = .fromInterned(lazy_sym.key);
const unrestricted_ty: Type =
.fromInterned(restricted_type.unrestricted_type);
return .{ .required_alignment = restricted_ty.abiAlignment(zcu)
.maxStrict(unrestricted_ty.abiAlignment(zcu)) };
},
},
},
.deferred_const_data => switch (lazy_sym.key) {
else => unreachable,
@@ -309,33 +301,14 @@ pub fn getLazySymbolInfo(
},
_ => switch (ip.indexToKey(lazy_sym.key)) {
else => unreachable,
.restricted_value => |restricted_value| switch (kind) {
.structure => .{ .parent = .{ .kind = .const_data, .key = restricted_value.ty }, .modify = .{
.lazy_sym = .{ .kind = .deferred_const_data, .key = restricted_value.ty },
.operation = .end_ptr_inc,
} },
.attributes => {
const unrestricted_ty: Type = .fromInterned(
ip.indexToKey(restricted_value.ty).restricted_type.unrestricted_type,
);
return .{
.required_alignment = unrestricted_ty.abiAlignment(zcu),
.size = unrestricted_ty.abiSize(zcu),
};
},
},
.restricted_type => switch (kind) {
.structure => .{ .parent = .{ .kind = .const_data, .key = lazy_sym.key } },
.restricted_type => |restricted_type| switch (kind) {
.structure => .{},
.attributes => {
const restricted_ty: Type = .fromInterned(lazy_sym.key);
const unrestricted_ty: Type = .fromInterned(
ip.indexToKey(lazy_sym.key).restricted_type.unrestricted_type,
);
return .{
.header = true,
.required_alignment = restricted_ty.abiAlignment(zcu),
.size = unrestricted_ty.abiAlignment(zcu).forward(restricted_ty.abiSize(zcu)),
};
const unrestricted_ty: Type =
.fromInterned(restricted_type.unrestricted_type);
return .{ .required_alignment = restricted_ty.abiAlignment(zcu)
.maxStrict(unrestricted_ty.abiAlignment(zcu)) };
},
},
},
@@ -379,7 +352,6 @@ pub fn generateLazySymbol(
}
return;
},
.restricted_type => return,
else => {},
},
.deferred_const_data => switch (lazy_sym.key) {
@@ -404,10 +376,22 @@ pub fn generateLazySymbol(
return;
},
_ => switch (ip.indexToKey(lazy_sym.key)) {
.restricted_value => |restricted_value| return generateSymbol(bin_file, pt, src_loc, .fromInterned(
restricted_value.unrestricted_value,
), w, reloc_parent),
.restricted_type => return w.splatByteAll(0, @divExact(zcu.getTarget().ptrBitWidth(), 8)),
.restricted_type => |restricted_type| {
const restricted_ty: Type = .fromInterned(lazy_sym.key);
const unrestricted_ty: Type = .fromInterned(restricted_type.unrestricted_type);
const values: *const std.array_hash_map.Auto(InternPool.Index, void) =
bin_file.restricted.getPtr(lazy_sym.key) orelse &.empty;
const values_len = values.count();
const len_size = restricted_ty.abiSize(zcu);
const array_start = unrestricted_ty.abiAlignment(zcu).forward(len_size);
try w.rebase(w.end, array_start + unrestricted_ty.abiSize(zcu) * values_len);
w.writeInt(u32, @intCast(values_len), endian) catch unreachable;
w.splatByteAll(0, array_start - len_size) catch unreachable;
for (values.keys()) |value| try generateSymbol(bin_file, pt, src_loc, .fromInterned(
ip.indexToKey(value).restricted_value.unrestricted_value,
), w, reloc_parent);
return;
},
else => {},
},
else => {},
@@ -782,10 +766,13 @@ pub fn generateSymbol(
}
},
.bitpack => |bitpack| try generateSymbol(bin_file, pt, src_loc, .fromInterned(bitpack.backing_int_val), w, reloc_parent),
.restricted_value => |restricted_value| switch (ty.restrictedRepr(zcu)) {
.indirect => try lowerLazySymbolRef(bin_file, pt, .{ .kind = .deferred_const_data, .key = val.toIntern() }, w, reloc_parent, 0),
.direct => try generateSymbol(bin_file, pt, src_loc, .fromInterned(restricted_value.unrestricted_value), w, reloc_parent),
},
.restricted_value => |restricted_value| if (zcu.backendSupportsFeature(.restricted_types)) {
const gpa = zcu.gpa;
const type_gop = try bin_file.restricted.getOrPut(gpa, restricted_value.ty);
if (!type_gop.found_existing) type_gop.value_ptr.* = .empty;
const value_gop = try type_gop.value_ptr.getOrPut(gpa, val.toIntern());
try w.writeInt(u32, @intCast(value_gop.index), endian);
} else try generateSymbol(bin_file, pt, src_loc, .fromInterned(restricted_value.unrestricted_value), w, reloc_parent),
.memoized_call => unreachable,
}
}
@@ -1167,7 +1154,6 @@ pub fn genTypedValue(
} },
.fail => |em| .{ .fail = em },
},
.lea_lazy_sym => unreachable, // `Zcu.Feature.restricted_types` is not supported by this code path
};
}
@@ -1180,7 +1166,6 @@ const LowerResult = union(enum) {
lea_nav: InternPool.Nav.Index,
load_uav: InternPool.Key.Ptr.BaseAddr.Uav,
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
lea_lazy_sym: link.File.LazySymbol,
};
pub fn lowerValue(pt: Zcu.PerThread, start_val: Value, target: *const std.Target) Allocator.Error!LowerResult {
@@ -1192,12 +1177,15 @@ pub fn lowerValue(pt: Zcu.PerThread, start_val: Value, target: *const std.Target
if (start_val.isUndef(zcu)) return .undef;
const ty, const val: Value = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (start_ty.restrictedRepr(zcu)) {
.indirect => return .{ .lea_lazy_sym = .{ .kind = .deferred_const_data, .key = start_val.toIntern() } },
.direct => .{ unrestricted_ty, .fromInterned(ip.indexToKey(start_val.toIntern()).restricted_value.unrestricted_value) },
} else .{ start_ty, start_val };
const ty, const val: Value, const use_uav = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty|
if (zcu.backendSupportsFeature(.restricted_types))
.{ start_ty, start_val, true }
else
.{ unrestricted_ty, .fromInterned(ip.indexToKey(start_val.toIntern()).restricted_value.unrestricted_value), false }
else
.{ start_ty, start_val, false };
switch (ty.zigTypeTag(zcu)) {
if (!use_uav) switch (ty.zigTypeTag(zcu)) {
.void => return .none,
.bool => return .{ .immediate = @intFromBool(val.toBool()) },
.pointer => switch (ty.ptrSize(zcu)) {
@@ -1308,7 +1296,7 @@ pub fn lowerValue(pt: Zcu.PerThread, start_val: Value, target: *const std.Target
.@"opaque" => unreachable,
else => {},
}
};
return .{ .load_uav = .{
.val = val.toIntern(),
+4 -38
View File
@@ -305,6 +305,8 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
.errunion_payload_ptr_set,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.unwrap_restricted_safe,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -658,28 +660,6 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
air_inst_index = air_body[air_body_index];
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.unwrap_restricted, .unwrap_restricted_safe => {
const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
maybe_noop: {
switch (isel.air.typeOf(ty_op.operand, ip).restrictedRepr(zcu)) {
.indirect => break :maybe_noop,
.direct => {},
}
if (true) break :maybe_noop;
if (ty_op.operand.toIndex()) |src_air_inst_index| {
if (isel.hints.get(src_air_inst_index)) |hint_vpsi| {
try isel.hints.putNoClobber(gpa, air_inst_index, hint_vpsi);
}
}
}
try isel.analyzeUse(ty_op.operand);
try isel.def_order.putNoClobber(gpa, air_inst_index, {});
air_body_index += 1;
air_inst_index = air_body[air_body_index];
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.struct_field_ptr, .struct_field_val => {
const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -5763,22 +5743,8 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
defer dst_vi.value.deref(isel);
const ty_op = air.data(air.inst_index).ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = isel.air.typeOf(ty_op.operand, ip);
switch (restricted_ty.restrictedRepr(zcu)) {
.indirect => {
switch (air_tag) {
else => unreachable,
.unwrap_restricted => {},
.unwrap_restricted_safe => {}, // TODO
}
const ptr_vi = try isel.use(ty_op.operand);
const ptr_mat = try ptr_vi.matReg(isel);
_ = try dst_vi.value.load(isel, unrestricted_ty, ptr_mat.ra, .{});
try ptr_mat.finish(isel);
},
.direct => try dst_vi.value.move(isel, ty_op.operand),
}
_ = air_tag; // TODO
try dst_vi.value.move(isel, ty_op.operand);
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+36 -103
View File
@@ -1306,20 +1306,15 @@ pub const DeclGen = struct {
if (loaded_union.layout == .auto) try w.writeByte('}');
}
},
.restricted_value => |restricted_value| switch (ty.restrictedRepr(zcu)) {
.indirect => {
const loaded_restricted = ip.loadRestrictedType(ty.toIntern());
// Explicitly add the restricted decl dependency on the unrestricted type
_ = try CType.lower(.fromInterned(loaded_restricted.unrestricted_type), &dg.ctype_deps, dg.arena, zcu);
try dg.need_restricted.put(zcu.gpa, val.toIntern(), {});
.restricted_value => {
const loaded_restricted = ip.loadRestrictedType(ty.toIntern());
// Explicitly add the restricted decl dependency on the unrestricted type
_ = try CType.lower(.fromInterned(loaded_restricted.unrestricted_type), &dg.ctype_deps, dg.arena, zcu);
try dg.need_restricted.put(zcu.gpa, val.toIntern(), {});
const restricted_ty_name = loaded_restricted.name.toSlice(ip);
try w.print("&zig_restricted_{f}__{d}[zig_restricted_index_{f}__{d}]", .{
fmtIdentUnsolo(restricted_ty_name), ty.toIntern(),
fmtIdentUnsolo(restricted_ty_name), val.toIntern(),
});
},
.direct => try dg.renderValue(w, .fromInterned(restricted_value.unrestricted_value), initializer_type),
try w.print("zig_restricted_value_{f}__{d}", .{
fmtIdentUnsolo(loaded_restricted.name.toSlice(ip)), val.toIntern(),
});
},
}
}
@@ -1327,7 +1322,7 @@ pub const DeclGen = struct {
fn renderUndefValue(
dg: *DeclGen,
w: *Writer,
start_ty: Type,
ty: Type,
location: ValueRenderLocation,
) Error!void {
const pt = dg.pt;
@@ -1345,8 +1340,7 @@ pub const DeclGen = struct {
.ReleaseFast, .ReleaseSmall => false,
};
var ty = start_ty;
ty: switch (start_ty.toIntern()) {
switch (ty.toIntern()) {
.c_longdouble_type,
.f16_type,
.f32_type,
@@ -1374,12 +1368,13 @@ pub const DeclGen = struct {
return w.writeByte(')');
},
.bool_type => try w.writeAll(if (safety_on) "0xaa" else "false"),
else => ty_key: switch (ip.indexToKey(ty.toIntern())) {
else => switch (ip.indexToKey(ty.toIntern())) {
.simple_type, // anyerror, c_char (etc), usize, isize
.int_type,
.enum_type,
.error_set_type,
.inferred_error_set_type,
.restricted_type,
=> switch (CType.classifyInt(ty, zcu)) {
.void => unreachable, // opv
.small => |s| {
@@ -1475,16 +1470,6 @@ pub const DeclGen = struct {
try w.writeAll(" }");
},
},
.restricted_type => |restricted_type| switch (ty.restrictedRepr(zcu)) {
.indirect => continue :ty_key .{ .ptr_type = .{
.child = restricted_type.unrestricted_type,
.flags = .{ .is_const = true },
} },
.direct => {
ty = .fromInterned(restricted_type.unrestricted_type);
continue :ty restricted_type.unrestricted_type;
},
},
.struct_type => {
const loaded_struct = ip.loadStructType(ty.toIntern());
switch (loaded_struct.layout) {
@@ -2138,8 +2123,8 @@ pub fn genRestricted(
});
for (restricted_vals.keys(), 0..) |restricted_val, restricted_index| {
try w.print(
\\#define zig_restricted_index_{f}__{d} {d}u
\\ [zig_restricted_index_{f}__{d}] =
\\#define zig_restricted_value_{f}__{d} {d}u
\\ [zig_restricted_value_{f}__{d}] =
, .{
fmtIdentUnsolo(restricted_ty_name),
restricted_val,
@@ -5641,81 +5626,29 @@ fn airUnwrapRestricted(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue
// Implicitly adds the restricted decl dependency on the unrestricted type
const local = try f.allocLocal(inst, unrestricted_ty);
switch (restricted_ty.restrictedRepr(zcu)) {
.indirect => {
if (safety) {
const target = &f.dg.mod.resolved_target.result;
const ptr_bits = target.ptrBitWidth();
try f.dg.need_restricted.put(zcu.gpa, restricted_ty.toIntern(), {});
const unrestricted_size = unrestricted_ty.abiSize(zcu);
assert(unrestricted_size > 0);
const restricted_ty_name = ip.loadRestrictedType(restricted_ty.toIntern()).name.toSlice(ip);
const ptr_diff = try f.allocLocal(inst, .usize);
try f.writeCValue(w, ptr_diff, .other);
try w.print(" = zig_subw_u{d}(({f})", .{
ptr_bits,
CType.fmtTypeName(.{ .int = .uintptr_t }, zcu),
});
try f.writeCValue(w, operand, .other);
try w.print(", ({f})zig_restricted_{f}__{d}, {f});", .{
CType.fmtTypeName(.{ .int = .uintptr_t }, zcu),
fmtIdentUnsolo(restricted_ty_name),
restricted_ty.toIntern(),
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits, false, 10, .lower),
});
try f.newline();
try w.writeAll("if (");
if (unrestricted_size == 1) {
try f.writeCValue(w, ptr_diff, .other);
} else if (std.math.isPowerOfTwo(unrestricted_size)) {
const rotate_amount = std.math.log2_int(u64, unrestricted_size);
try w.print("(zig_shr_u{d}(", .{ptr_bits});
try f.writeCValue(w, ptr_diff, .other);
try w.print(", {f}) | zig_shlw_u{d}(", .{
fmtUnsignedIntLiteralSmall(target, .uint8_t, rotate_amount, false, 10, .lower),
ptr_bits,
});
try f.writeCValue(w, ptr_diff, .other);
try w.print(", {f}, {f}))", .{
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits - rotate_amount, false, 10, .lower),
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits, false, 10, .lower),
});
} else {
try f.writeCValue(w, ptr_diff, .other);
try w.print(" % {f} != {f} || ", .{
fmtUnsignedIntLiteralSmall(target, .uintptr_t, unrestricted_size, false, 10, .lower),
fmtUnsignedIntLiteralSmall(target, .uintptr_t, 0, false, 10, .lower),
});
try f.writeCValue(w, ptr_diff, .other);
try w.print(" / {f}", .{
fmtUnsignedIntLiteralSmall(target, .uintptr_t, unrestricted_size, false, 10, .lower),
});
}
try w.print(" >= zig_restricted_len_{f}__{d}) {{", .{
fmtIdentUnsolo(restricted_ty_name),
restricted_ty.toIntern(),
});
f.indent();
try f.newline();
try f.writePanic(.corrupt_restricted_value, w);
try f.outdent();
try w.writeByte('}');
try f.newline();
}
try f.writeCValue(w, local, .other);
try w.writeAll(" = ");
try f.writeCValueDeref(w, operand);
},
.direct => {
try f.writeCValue(w, local, .other);
try w.writeAll(" = ");
try f.writeCValue(w, operand, .other);
},
try f.dg.need_restricted.put(zcu.gpa, restricted_ty.toIntern(), {});
const restricted_ty_name = ip.loadRestrictedType(restricted_ty.toIntern()).name.toSlice(ip);
if (safety) {
try w.writeAll("if (");
try f.writeCValue(w, operand, .other);
try w.print(" >= zig_restricted_len_{f}__{d}) {{", .{
fmtIdentUnsolo(restricted_ty_name),
restricted_ty.toIntern(),
});
f.indent();
try f.newline();
try f.writePanic(.corrupt_restricted_value, w);
try f.outdent();
try w.writeByte('}');
try f.newline();
}
try w.writeByte(';');
try f.writeCValue(w, local, .other);
try w.print(" = zig_restricted_{f}__{d}[", .{
fmtIdentUnsolo(restricted_ty_name),
restricted_ty.toIntern(),
});
try f.writeCValue(w, operand, .other);
try w.writeAll("];");
try f.newline();
return local;
+31 -46
View File
@@ -129,29 +129,27 @@ pub const CType = union(enum) {
pub fn bits(int: Int, target: *const std.Target) u16 {
return switch (int) {
// zig fmt: off
.char => target.cTypeBitSize(.char),
.char => target.cTypeBitSize(.char),
.@"unsigned short" => target.cTypeBitSize(.ushort),
.@"unsigned int" => target.cTypeBitSize(.uint),
.@"unsigned long" => target.cTypeBitSize(.ulong),
.@"unsigned long long" => target.cTypeBitSize(.ulonglong),
.@"unsigned short" => target.cTypeBitSize(.ushort),
.@"unsigned int" => target.cTypeBitSize(.uint),
.@"unsigned long" => target.cTypeBitSize(.ulong),
.@"unsigned long long" => target.cTypeBitSize(.ulonglong),
.@"signed short" => target.cTypeBitSize(.short),
.@"signed int" => target.cTypeBitSize(.int),
.@"signed long" => target.cTypeBitSize(.long),
.@"signed long long" => target.cTypeBitSize(.longlong),
.@"signed short" => target.cTypeBitSize(.short),
.@"signed int" => target.cTypeBitSize(.int),
.@"signed long" => target.cTypeBitSize(.long),
.@"signed long long" => target.cTypeBitSize(.longlong),
.uintptr_t, .intptr_t => target.ptrBitWidth(),
.uintptr_t, .intptr_t => target.ptrBitWidth(),
.uint8_t, .int8_t => 8,
.uint16_t, .int16_t => 16,
.uint24_t, .int24_t => 24,
.uint32_t, .int32_t => 32,
.uint48_t, .int48_t => 48,
.uint64_t, .int64_t => 64,
.zig_u128, .zig_i128 => 128,
// zig fmt: on
.uint8_t, .int8_t => 8,
.uint16_t, .int16_t => 16,
.uint24_t, .int24_t => 24,
.uint32_t, .int32_t => 32,
.uint48_t, .int48_t => 48,
.uint64_t, .int64_t => 64,
.zig_u128, .zig_i128 => 128,
};
}
};
@@ -239,20 +237,8 @@ pub const CType = union(enum) {
) Allocator.Error!CType {
const gpa = zcu.comp.gpa;
const ip = &zcu.intern_pool;
var cur_ty: Type = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (start_ty.restrictedRepr(zcu)) {
.indirect => {
const unrestricted_cty = try lowerInner(unrestricted_ty, true, deps, arena, zcu);
const unrestricted_cty_buf = try arena.create(CType);
unrestricted_cty_buf.* = unrestricted_cty;
return .{ .pointer = .{
.@"const" = true,
.@"volatile" = false,
.elem_ty = unrestricted_cty_buf,
.nonstring = unrestricted_cty.isStringElem(),
} };
},
.direct => unrestricted_ty,
} else start_ty;
if (ip.isRestrictedType(start_ty.toIntern())) return .{ .int = .uint32_t };
var cur_ty = start_ty;
while (true) {
switch (cur_ty.zigTypeTag(zcu)) {
.type,
@@ -494,6 +480,7 @@ pub const CType = union(enum) {
/// Asserts that `ty` is an integer, enum, bitpack, or error set.
pub fn classifyInt(ty: Type, zcu: *const Zcu) IntClass {
if (zcu.intern_pool.isRestrictedType(ty.toIntern())) return classifyBitInt(.unsigned, 32, zcu);
const int_ty: Type = switch (ty.zigTypeTag(zcu)) {
.error_set => return classifyBitInt(.unsigned, zcu.errorSetBits(), zcu),
.@"enum" => ty.intTagType(zcu),
@@ -502,22 +489,20 @@ pub const CType = union(enum) {
else => unreachable,
};
switch (int_ty.toIntern()) {
// zig fmt: off
.usize_type => return .{ .small = .uintptr_t },
.isize_type => return .{ .small = .intptr_t },
.usize_type => return .{ .small = .uintptr_t },
.isize_type => return .{ .small = .intptr_t },
.c_char_type => return .{ .small = .char },
.c_char_type => return .{ .small = .char },
.c_short_type => return .{ .small = .@"signed short" },
.c_int_type => return .{ .small = .@"signed int" },
.c_long_type => return .{ .small = .@"signed long" },
.c_longlong_type => return .{ .small = .@"signed long long" },
.c_short_type => return .{ .small = .@"signed short" },
.c_int_type => return .{ .small = .@"signed int" },
.c_long_type => return .{ .small = .@"signed long" },
.c_longlong_type => return .{ .small = .@"signed long long" },
.c_ushort_type => return .{ .small = .@"unsigned short" },
.c_uint_type => return .{ .small = .@"unsigned int" },
.c_ulong_type => return .{ .small = .@"unsigned long" },
.c_ulonglong_type => return .{ .small = .@"unsigned long long" },
// zig fmt: on
.c_ushort_type => return .{ .small = .@"unsigned short" },
.c_uint_type => return .{ .small = .@"unsigned int" },
.c_ulong_type => return .{ .small = .@"unsigned long" },
.c_ulonglong_type => return .{ .small = .@"unsigned long long" },
else => {
const int = ty.intInfo(zcu);
+22 -44
View File
@@ -722,7 +722,7 @@ pub const Object = struct {
gop.value_ptr.* = .{
.len = try o.builder.addVariable(
try o.builder.strtabStringFmt("{s}.len", .{ty_name}),
try o.lowerType(.usize),
.i32,
.default,
),
.array = try o.builder.addVariable(
@@ -734,7 +734,7 @@ pub const Object = struct {
};
gop.value_ptr.len.setLinkage(.private, &o.builder);
gop.value_ptr.len.setMutability(.constant, &o.builder);
gop.value_ptr.len.setAlignment(Type.ptrAbiAlignment(target).toLlvm(), &o.builder);
gop.value_ptr.len.setAlignment(.fromByteUnits(std.zig.target.intAlignment(target, 32)), &o.builder);
gop.value_ptr.len.setUnnamedAddr(.unnamed_addr, &o.builder);
gop.value_ptr.array.setLinkage(.private, &o.builder);
gop.value_ptr.array.setMutability(.constant, &o.builder);
@@ -747,12 +747,9 @@ pub const Object = struct {
fn genRestrictedDecls(o: *Object) Allocator.Error!void {
for (o.restricted_map.values()) |restricted_decls| {
const len = restricted_decls.values.count();
try restricted_decls.len.setInitializer(
try o.builder.intConst(restricted_decls.len.typeOf(&o.builder), len),
&o.builder,
);
try restricted_decls.len.setInitializer(try o.builder.intConst(.i32, len), &o.builder);
try restricted_decls.array.setInitializer(switch (len) {
0 => try o.builder.structConst(try o.builder.structType(.normal, &.{}), &.{}),
0 => try o.builder.zeroInitConst(.i8), // ensure unique address
else => try o.builder.arrayConst(
try o.builder.arrayType(len, restricted_decls.values.values()[0].typeOf(&o.builder)),
restricted_decls.values.values(),
@@ -2027,7 +2024,7 @@ pub const Object = struct {
fn lowerDebugType(
o: *Object,
pt: Zcu.PerThread,
start_ty: Type,
ty: Type,
ty_fwd_ref: Builder.Metadata,
) Allocator.Error!Builder.Metadata {
assert(!o.builder.strip);
@@ -2037,7 +2034,7 @@ pub const Object = struct {
const target = zcu.getTarget();
const ip = &zcu.intern_pool;
const name = try o.builder.metadataStringFmt("{f}", .{start_ty.fmt(pt)});
const name = try o.builder.metadataStringFmt("{f}", .{ty.fmt(pt)});
// lldb cannot handle non-byte-sized types, so in the logic below, bit sizes are padded up.
// For instance, `bool` is considered to be 8 bits, and `u60` is considered to be 64 bits.
@@ -2048,23 +2045,16 @@ pub const Object = struct {
// handling for variants at all, and will never print fields in them, so I opted not to use
// them for now.
const ty = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (start_ty.restrictedRepr(zcu)) {
.indirect => {
const ptr_size = Type.ptrAbiSize(zcu.getTarget());
const ptr_align = Type.ptrAbiAlignment(zcu.getTarget());
return o.builder.debugPointerType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
try o.getDebugType(pt, unrestricted_ty),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
);
},
.direct => unrestricted_ty,
} else start_ty;
if (ip.isRestrictedType(ty.toIntern())) return o.builder.debugTypedefType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
try o.getDebugType(pt, .u32),
ty.abiSize(zcu) * 8,
ty.abiAlignment(zcu).toByteUnits().? * 8,
0, // offset
);
switch (ty.zigTypeTag(zcu)) {
.void,
@@ -3223,10 +3213,7 @@ pub const Object = struct {
return o.builder.structType(.normal, fields[0..fields_len]);
},
.simple_type => unreachable,
.restricted_type => |restricted_type| switch (t.restrictedRepr(zcu)) {
.indirect => .ptr,
.direct => try o.lowerType(.fromInterned(restricted_type.unrestricted_type)),
},
.restricted_type => .i32,
.struct_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
@@ -3997,20 +3984,11 @@ pub const Object = struct {
else
union_ty, vals[0..len]);
},
.restricted_value => |restricted_value| switch (ty.restrictedRepr(zcu)) {
.indirect => {
const restricted_decls = try o.getRestrictedDecls(ty);
const gop = try restricted_decls.values.getOrPut(o.gpa, arg_val);
if (!gop.found_existing) gop.value_ptr.* = try o.lowerValue(restricted_value.unrestricted_value);
return o.builder.gepConst(
.inbounds,
gop.value_ptr.typeOf(&o.builder),
restricted_decls.array.toConst(&o.builder),
null,
&.{try o.builder.intConst(.i64, gop.index)},
);
},
.direct => try o.lowerValue(restricted_value.unrestricted_value),
.restricted_value => |restricted_value| {
const restricted_decls = try o.getRestrictedDecls(ty);
const gop = try restricted_decls.values.getOrPut(o.gpa, arg_val);
if (!gop.found_existing) gop.value_ptr.* = try o.lowerValue(restricted_value.unrestricted_value);
return o.builder.intConst(.i32, gop.index);
},
.memoized_call => unreachable,
};
+29 -67
View File
@@ -3258,65 +3258,30 @@ fn airUnwrapRestricted(fg: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocat
const zcu = o.zcu;
const target = zcu.getTarget();
const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = fg.typeOf(ty_op.operand);
const operand = try fg.resolveInst(ty_op.operand);
switch (restricted_ty.restrictedRepr(zcu)) {
.indirect => {
const unrestricted_ty = ty_op.ty.toType();
if (safety) {
const restricted_decls = try o.getRestrictedDecls(restricted_ty);
const llvm_usize_ty = restricted_decls.len.typeOf(&o.builder);
const unrestricted_size = unrestricted_ty.abiSize(zcu);
assert(unrestricted_size > 0);
const array = try o.builder.castConst(.ptrtoint, restricted_decls.array.toConst(&o.builder), llvm_usize_ty);
const ptr_diff = try fg.wip.bin(
.sub,
try fg.wip.cast(.ptrtoint, operand, llvm_usize_ty, "unwrap_restricted.operand_int"),
array.toValue(),
"unwrap_restricted.ptr_diff",
);
const len = try fg.wip.load(
.normal,
llvm_usize_ty,
restricted_decls.len.toValue(&o.builder),
Type.ptrAbiAlignment(target).toLlvm(),
"unwrap_restricted.len",
);
const is_po2_unrestricted_size = std.math.isPowerOfTwo(unrestricted_size);
const check_block = if (is_po2_unrestricted_size) undefined else try fg.wip.block(1, "unwrap_restricted.check");
const invalid_block = try fg.wip.block(if (is_po2_unrestricted_size) 1 else 2, "unwrap_restricted.invalid");
const valid_block = try fg.wip.block(1, "unwrap_restricted.valid");
if (is_po2_unrestricted_size) {
const index = if (unrestricted_size == 1)
ptr_diff
else
try fg.wip.callIntrinsic(.normal, .none, .fshr, &.{llvm_usize_ty}, &.{
ptr_diff,
ptr_diff,
try o.builder.intValue(llvm_usize_ty, std.math.log2_int(u64, unrestricted_size)),
}, "unwrap_restricted.index");
const ok = try fg.wip.icmp(.ult, index, len, "unwrap_restricted.ok");
_ = try fg.wip.brCond(ok, valid_block, invalid_block, .none);
} else {
const unrestricted_size_value = try o.builder.intValue(llvm_usize_ty, unrestricted_size);
const misalignment = try fg.wip.bin(.urem, ptr_diff, unrestricted_size_value, "unwrap_restricted.misalignment");
const misaligned = try fg.wip.icmp(.ne, misalignment, try o.builder.intValue(llvm_usize_ty, 0), "unwrap_restricted.misaligned");
_ = try fg.wip.brCond(misaligned, invalid_block, check_block, .none);
const restricted_decls = try o.getRestrictedDecls(restricted_ty);
if (safety) {
const len = try fg.wip.load(
.normal,
.i32,
restricted_decls.len.toValue(&o.builder),
.fromByteUnits(std.zig.target.intAlignment(target, 32)),
"unwrap_restricted.len",
);
const ok = try fg.wip.icmp(.ult, operand, len, "unwrap_restricted.ok");
const invalid_block = try fg.wip.block(1, "unwrap_restricted.invalid");
const valid_block = try fg.wip.block(1, "unwrap_restricted.valid");
_ = try fg.wip.brCond(ok, valid_block, invalid_block, .none);
fg.wip.cursor = .{ .block = check_block };
const index = try fg.wip.bin(.@"udiv exact", ptr_diff, unrestricted_size_value, "unwrap_restricted.index");
const ok = try fg.wip.icmp(.ult, index, len, "unwrap_restricted.ok");
_ = try fg.wip.brCond(ok, valid_block, invalid_block, .none);
}
fg.wip.cursor = .{ .block = invalid_block };
try fg.buildSimplePanic(.corrupt_restricted_value);
fg.wip.cursor = .{ .block = invalid_block };
try fg.buildSimplePanic(.corrupt_restricted_value);
fg.wip.cursor = .{ .block = valid_block };
}
return fg.load(operand, unrestricted_ty, unrestricted_ty.abiAlignment(zcu).toLlvm(), .normal);
},
.direct => return operand,
fg.wip.cursor = .{ .block = valid_block };
}
const ptr = try fg.ptraddScaled(restricted_decls.array.toValue(&o.builder), operand, unrestricted_ty.abiSize(zcu));
return fg.load(ptr, unrestricted_ty, unrestricted_ty.abiAlignment(zcu).toLlvm(), .normal);
}
fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Value {
@@ -6701,7 +6666,7 @@ const ParamTypeIterator = struct {
it.zig_index += 1;
it.llvm_index += 1;
if (ty.isSlice(zcu) or
(ty.zigTypeTag(zcu) == .optional and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu)))
(zcu.intern_pool.isOptionalType(ty.toIntern()) and ty.optionalChild(zcu).isSlice(zcu) and !ty.ptrAllowsZero(zcu)))
{
it.llvm_index += 1;
return .slice;
@@ -7292,11 +7257,8 @@ pub fn buildAllocaInner(
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
/// or as an LLVM value.
pub fn isByRef(ty: Type, zcu: *const Zcu) bool {
const unrestricted_ty = if (ty.unrestrictedType(zcu)) |unrestricted_ty| switch (ty.restrictedRepr(zcu)) {
.indirect => return false,
.direct => unrestricted_ty,
} else ty;
return switch (unrestricted_ty.zigTypeTag(zcu)) {
if (zcu.intern_pool.isRestrictedType(ty.toIntern())) return false;
return switch (ty.zigTypeTag(zcu)) {
.type,
.comptime_int,
.comptime_float,
@@ -7321,19 +7283,19 @@ pub fn isByRef(ty: Type, zcu: *const Zcu) bool {
.array,
.frame,
=> unrestricted_ty.hasRuntimeBits(zcu),
=> ty.hasRuntimeBits(zcu),
.error_union => unrestricted_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu),
.error_union => ty.errorUnionPayload(zcu).hasRuntimeBits(zcu),
.optional => !unrestricted_ty.optionalReprIsPayload(zcu) and unrestricted_ty.optionalChild(zcu).hasRuntimeBits(zcu),
.optional => !ty.optionalReprIsPayload(zcu) and ty.optionalChild(zcu).hasRuntimeBits(zcu),
.@"struct" => switch (unrestricted_ty.containerLayout(zcu)) {
.@"struct" => switch (ty.containerLayout(zcu)) {
.@"packed" => false,
.auto, .@"extern" => unrestricted_ty.hasRuntimeBits(zcu),
.auto, .@"extern" => ty.hasRuntimeBits(zcu),
},
.@"union" => switch (unrestricted_ty.containerLayout(zcu)) {
.@"union" => switch (ty.containerLayout(zcu)) {
.@"packed" => false,
else => unrestricted_ty.hasRuntimeBits(zcu) and !unrestricted_ty.unionHasAllZeroBitFieldTypes(zcu),
else => ty.hasRuntimeBits(zcu) and !ty.unionHasAllZeroBitFieldTypes(zcu),
},
};
}
+2 -10
View File
@@ -6725,18 +6725,10 @@ fn airErrUnionPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void
}
fn airUnwrapRestricted(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
const zcu = cg.pt.zcu;
const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try cg.resolveInst(ty_op.operand);
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = cg.typeOf(ty_op.operand);
const result = result: switch (restricted_ty.restrictedRepr(zcu)) {
.indirect => {
_ = safety; // TODO
break :result try cg.load(operand, unrestricted_ty, 0);
},
.direct => cg.reuseOperand(ty_op.operand, operand),
};
_ = safety; // TODO
const result = cg.reuseOperand(ty_op.operand, operand); // TODO
return cg.finishAir(inst, result, &.{ty_op.operand});
}
+1315 -339
View File
@@ -103826,314 +103826,1260 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = cg.typeOf(ty_op.operand);
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand}) ++ .{try cg.tempInit(ty_op.ty.toType(), .none)};
const res = res: switch (restricted_ty.restrictedRepr(zcu)) {
.indirect => {
if (zcu.comp.config.use_new_linker) switch (air_tag) {
else => unreachable,
.unwrap_restricted => {},
.unwrap_restricted_safe => cg.select(&.{}, &.{}, &ops, &.{ .{
.required_features = .{ .avx, .bmi2, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._rx, .ro, .tmp2p, .tmp2p, .sa(.src1, .add_log2_size), ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.src1, .add_log2_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .reg = .rax } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .reg = .rdx } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._, .mov, .tmp3d, .sa(.src1, .add_size), ._, ._ },
.{ ._, ._, .xor, .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._, .div, .tmp3p, ._, ._, ._ },
.{ ._, ._, .@"test", .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._, .call, .tmp5d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .sse, .bmi2, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._rx, .ro, .tmp2p, .tmp2p, .sa(.src1, .add_log2_size), ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.src1, .add_log2_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .reg = .rax } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .reg = .rdx } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._, .mov, .tmp3d, .sa(.src1, .add_size), ._, ._ },
.{ ._, ._, .xor, .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._, .div, .tmp3p, ._, ._, ._ },
.{ ._, ._, .@"test", .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._, .call, .tmp5d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .bmi2, null, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._rx, .ro, .tmp2p, .tmp2p, .sa(.src1, .add_log2_size), ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.src1, .add_log2_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .reg = .rax } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .reg = .rdx } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._, .mov, .tmp3d, .sa(.src1, .add_size), ._, ._ },
.{ ._, ._, .xor, .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._, .div, .tmp3p, ._, ._, ._ },
.{ ._, ._, .@"test", .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._, .call, .tmp5d, ._, ._, ._ },
} },
} }) catch |err| switch (err) {
error.SelectFailed => return cg.fail("failed to select {t} {f} {f} {f}", .{
air_tag,
unrestricted_ty.fmt(pt),
restricted_ty.fmt(pt),
ops[0].tracking(cg),
}),
else => |e| return e,
},
};
break :res try ops[0].load(unrestricted_ty, .{}, cg);
},
.direct => ops[0],
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand}) ++ .{
try cg.tempInit(.usize, .{ .immediate = unrestricted_ty.abiAlignment(zcu).forward(restricted_ty.abiSize(zcu)) }),
};
var res: [1]Temp = undefined;
cg.select(&res, &.{unrestricted_ty}, &ops, switch (air_tag) {
else => unreachable,
.unwrap_restricted => &.{ .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .int = .qword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0q, .leasi(.tmp0q, .@"8", .dst0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .src0), ._, ._ },
} },
} },
.unwrap_restricted_safe => &.{ .{
.required_features = .{ .@"64bit", .avx, null, null },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .sse, null, null },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .avx, null, null },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .sse, null, null },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leai(.tmp0b, .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .avx, null, null },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .sse, null, null },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .avx, null, null },
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .sse, null, null },
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .avx, null, null },
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .sse, null, null },
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .avx, null, null },
.dst_constraints = .{ .{ .int = .qword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0q, .leasi(.tmp0q, .@"8", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", .sse, null, null },
.dst_constraints = .{ .{ .int = .qword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0q, .leasi(.tmp0q, .@"8", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
.dst_constraints = .{ .{ .int = .qword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .src0d, ._, ._ },
.{ ._, ._, .mov, .dst0q, .leasi(.tmp0q, .@"8", .dst0), ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movzx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movzx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movzx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movsx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movsx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movsx, .dst0d, .leai(.tmp0b, .src0), ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movzx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .movsx, .dst0d, .leasi(.tmp0w, .@"2", .src0), ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .src0), ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .src0), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_src1), ._, ._ },
.{ ._, ._, .cmp, .src0d, .leaa(.tmp0d, .sub_src1), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp2d, ._, ._, ._ },
.{ .@"0:", ._, .mov, .dst0d, .leasi(.tmp0d, .@"4", .src0), ._, ._ },
} },
} },
}) catch |err| switch (err) {
error.SelectFailed => return cg.fail("failed to select {t} {f} {f} {f}", .{
air_tag,
unrestricted_ty.fmt(pt),
restricted_ty.fmt(pt),
ops[0].tracking(cg),
}),
else => |e| return e,
};
for (ops[1..]) |op| try op.die(cg);
try res.finish(inst, &.{ty_op.operand}, ops[0..1], cg);
try res[0].finish(inst, &.{ty_op.operand}, ops[0..1], cg);
},
.struct_field_ptr => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
@@ -104306,7 +105252,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .bt, .src0d, .src1d, ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .byte }, .any },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .simm32, .none } },
},
@@ -104315,16 +105261,34 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .movzx, .dst0d, .mema(.src0b, .add_src0_elem_size_mul_src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .byte }, .any },
.dst_constraints = .{ .{ .unsigned_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .none } },
},
.dst_temps = .{ .{ .rc = .general_purpose }, .unused },
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .dst0d, .memi(.src0b, .src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .word }, .any },
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .simm32, .none } },
},
.dst_temps = .{ .{ .rc = .general_purpose }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .movsx, .dst0d, .mema(.src0b, .add_src0_elem_size_mul_src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .byte }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .none } },
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .movsx, .dst0d, .memi(.src0b, .src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .simm32, .none } },
},
@@ -104333,14 +105297,32 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .movzx, .dst0d, .mema(.src0w, .add_src0_elem_size_mul_src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .word }, .any },
.dst_constraints = .{ .{ .unsigned_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .none } },
},
.dst_temps = .{ .{ .rc = .general_purpose }, .unused },
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .dst0d, .memsi(.src0w, .@"2", .src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .simm32, .none } },
},
.dst_temps = .{ .{ .rc = .general_purpose }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .movsx, .dst0d, .mema(.src0w, .add_src0_elem_size_mul_src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .signed_int = .word }, .any },
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .none } },
},
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .movsx, .dst0d, .memsi(.src0w, .@"2", .src1), ._, ._ },
} },
}, .{
.dst_constraints = .{ .{ .int = .dword }, .any },
.patterns = &.{
@@ -104355,7 +105337,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .none } },
},
.dst_temps = .{ .{ .rc = .general_purpose }, .unused },
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .mov, .dst0d, .memsi(.src0d, .@"4", .src1), ._, ._ },
} },
@@ -104375,7 +105357,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.patterns = &.{
.{ .src = .{ .to_mem, .to_gpr, .none } },
},
.dst_temps = .{ .{ .rc = .general_purpose }, .unused },
.dst_temps = .{ .{ .mut_rc = .{ .ref = .src1, .rc = .general_purpose } }, .unused },
.each = .{ .once = &.{
.{ ._, ._, .mov, .dst0q, .memsi(.src0q, .@"8", .src1), ._, ._ },
} },
@@ -174389,28 +175371,26 @@ fn allocRegOrMemAdvanced(self: *CodeGen, ty: Type, inst: ?Air.Inst.Index, reg_ok
if (reg_ok) need_mem: {
if (!std.math.isPowerOfTwo(abi_size)) break :need_mem;
const unrestricted_ty: Type = if (ty.unrestrictedType(zcu)) |unrestricted_ty| switch (ty.restrictedRepr(zcu)) {
.indirect => .usize,
.direct => unrestricted_ty,
} else ty;
if (abi_size <= @as(u32, max_abi_size: switch (unrestricted_ty.zigTypeTag(zcu)) {
.float => switch (ty.floatBits(self.target)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
else => unreachable,
},
.vector => {
const elem_ty = ty.childType(zcu);
break :max_abi_size if (elem_ty.toIntern() == .bool_type)
8
else if (self.floatBits(elem_ty)) |float_bits| switch (float_bits) {
16, 32, 64, 128 => self.vectorSize(.float),
if (zcu.intern_pool.isRestrictedType(ty.toIntern()) or
abi_size <= @as(u32, max_abi_size: switch (ty.zigTypeTag(zcu)) {
.float => switch (ty.floatBits(self.target)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
else => unreachable,
} else self.vectorSize(.int);
},
else => 8,
})) {
},
.vector => {
const elem_ty = ty.childType(zcu);
break :max_abi_size if (elem_ty.toIntern() == .bool_type)
8
else if (self.floatBits(elem_ty)) |float_bits| switch (float_bits) {
16, 32, 64, 128 => self.vectorSize(.float),
80 => break :need_mem,
else => unreachable,
} else self.vectorSize(.int);
},
else => 8,
}))
{
if (self.register_manager.tryAllocReg(inst, self.regSetForType(ty))) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
@@ -181410,7 +182390,6 @@ fn lowerValue(cg: *CodeGen, val: Value) Allocator.Error!MCValue {
.lea_nav => |nav| .{ .lea_nav = nav },
.lea_uav => |uav| .{ .lea_uav = uav },
.load_uav => |uav| .{ .load_uav = uav },
.lea_lazy_sym => |lazy_sym| .{ .lea_lazy_sym = lazy_sym },
};
}
@@ -189344,7 +190323,6 @@ const Select = struct {
ptr_size,
ptr_bit_size,
size,
log2_size,
src0_size,
dst0_size,
delta_size,
@@ -189379,7 +190357,6 @@ const Select = struct {
rhs: Memory.Scale,
const none: Adjust = .{ .sign = .pos, .lhs = .none, .op = .mul, .rhs = .@"1" };
const add_ptr_size: Adjust = .{ .sign = .pos, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
const sub_ptr_size: Adjust = .{ .sign = .neg, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
const add_ptr_bit_size: Adjust = .{ .sign = .pos, .lhs = .ptr_bit_size, .op = .mul, .rhs = .@"1" };
const add_size: Adjust = .{ .sign = .pos, .lhs = .size, .op = .mul, .rhs = .@"1" };
@@ -189388,7 +190365,6 @@ const Select = struct {
const sub_size_div_8: Adjust = .{ .sign = .neg, .lhs = .size, .op = .div, .rhs = .@"8" };
const sub_size_div_4: Adjust = .{ .sign = .neg, .lhs = .size, .op = .div, .rhs = .@"4" };
const sub_size: Adjust = .{ .sign = .neg, .lhs = .size, .op = .mul, .rhs = .@"1" };
const add_log2_size: Adjust = .{ .sign = .pos, .lhs = .log2_size, .op = .mul, .rhs = .@"1" };
const sub_src0_size_div_8: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .div, .rhs = .@"8" };
const sub_src0_size: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
@@ -189449,6 +190425,7 @@ const Select = struct {
const add_src1: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .mul, .rhs = .@"1" };
const add_src1_rem_32: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .rem_8_mul, .rhs = .@"4" };
const add_src1_rem_64: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .rem_8_mul, .rhs = .@"8" };
const sub_src1: Adjust = .{ .sign = .neg, .lhs = .src1, .op = .mul, .rhs = .@"1" };
const add_src1_sub_bit_size: Adjust = .{ .sign = .pos, .lhs = .src1_sub_bit_size, .op = .mul, .rhs = .@"1" };
const add_log2_src0_elem_size: Adjust = .{ .sign = .pos, .lhs = .log2_src0_elem_size, .op = .mul, .rhs = .@"1" };
const elem_mask: Adjust = .{ .sign = .pos, .lhs = .elem_mask, .op = .mul, .rhs = .@"1" };
@@ -190323,7 +191300,6 @@ const Select = struct {
.none => 0,
.ptr_size => @divExact(s.cg.target.ptrBitWidth(), 8),
.ptr_bit_size => s.cg.target.ptrBitWidth(),
.log2_size => std.math.log2_int_ceil(u64, op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)),
.dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)),
+3 -3
View File
@@ -1877,15 +1877,15 @@ pub const Memory = struct {
.mod = mem.mod,
.size = switch (mem.mod) {
.rm => |rm| rm.size,
.off => undefined,
.off => .none,
},
.index = switch (mem.mod) {
.rm => |rm| rm.index,
.off => undefined,
.off => .none,
},
.scale = switch (mem.mod) {
.rm => |rm| rm.scale,
.off => undefined,
.off => .@"1",
},
},
.base = switch (mem.base) {
+6
View File
@@ -412,6 +412,8 @@ pub const File = struct {
lock: ?Cache.Lock = null,
child_pid: ?std.process.Child.Id = null,
restricted: std.array_hash_map.Auto(InternPool.Index, std.array_hash_map.Auto(InternPool.Index, void)) = .empty,
pub const OpenOptions = struct {
symbol_count_hint: u64 = 32,
program_code_size_hint: u64 = 256 * 1024,
@@ -894,6 +896,10 @@ pub const File = struct {
}
pub fn destroy(base: *File) void {
const gpa = base.comp.gpa;
for (base.restricted.values()) |*value| value.deinit(gpa);
base.restricted.deinit(gpa);
const io = base.comp.io;
base.releaseLock();
if (base.file) |f| f.close(io);
+1
View File
@@ -1912,6 +1912,7 @@ fn flushUav(
try coff.nodes.ensureUnusedCapacity(gpa, 1);
const sym = si.get(coff);
const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
.size = Type.fromInterned(zcu.intern_pool.typeOf(uav_val)).abiSize(zcu),
.alignment = uav_align.toStdMem(),
.moved = true,
});
+36 -34
View File
@@ -3510,13 +3510,9 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
if (value_index == .anyerror_type) return; // handled in `flush` instead
const value_ip_key: InternPool.Key = switch (ip.indexToKey(value_index)) {
const value_ip_key = switch (ip.indexToKey(value_index)) {
.func => return, // populated by the Nav instead (`updateComptimeNav` or `initWipNav`)
.@"extern" => return, // populated by the Nav instead (`initWipNav`)
.restricted_value => |restricted_value| switch (Type.restrictedRepr(.fromInterned(restricted_value.ty), zcu)) {
.indirect => .{ .restricted_value = restricted_value },
.direct => ip.indexToKey(restricted_value.unrestricted_value),
},
else => |key| key,
};
@@ -3845,26 +3841,20 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
.adhoc_inferred_error_set => unreachable,
},
.restricted_type => |restricted_type| {
const repr = Type.restrictedReprByTrackedInst(restricted_type.zir_index, zcu);
try wip_nav.abbrevCode(switch (repr) {
.indirect => .ptr_type,
.direct => .alias_type,
});
try wip_nav.abbrevCode(.generated_struct_type);
try wip_nav.strpFmt("{f}", .{val.toType().fmt(pt)});
switch (repr) {
.indirect => {
try diw.writeByte(@intFromEnum(InternPool.Key.PtrType.AddressSpace.generic));
try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(diw.end + dwarf.sectionOffsetBytes()),
);
try wip_nav.abbrevCode(.is_const);
},
.direct => {},
try diw.writeUleb128(val.toType().abiSize(zcu));
try diw.writeUleb128(val.toType().abiAlignment(zcu).toByteUnits().?);
{
try wip_nav.abbrevCode(.generated_field);
try wip_nav.strp("value");
try wip_nav.refType(if (zcu.backendSupportsFeature(.restricted_types))
.u32
else
.fromInterned(restricted_type.unrestricted_type));
try diw.writeUleb128(0);
}
try wip_nav.refType(.fromInterned(restricted_type.unrestricted_type));
try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
.tuple_type => |tuple_type| if (tuple_type.types.len == 0) {
try wip_nav.abbrevCode(.generated_empty_struct_type);
@@ -4645,7 +4635,7 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
.un => |un| {
try wip_nav.abbrevCode(.aggregate_comptime_value);
try wip_nav.refType(.fromInterned(un.ty));
field: {
{
const loaded_union_type = ip.loadUnionType(un.ty);
assert(loaded_union_type.layout == .auto);
const field_index = zcu.unionTagFieldIndex(loaded_union_type, Value.fromInterned(un.tag)).?;
@@ -4658,23 +4648,35 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
else if (has_runtime_bits)
.comptime_value_field_runtime_bits
else
break :field);
.field);
try wip_nav.strp(field_name.toSlice(ip));
if (has_comptime_state)
try wip_nav.refValue(.fromInterned(un.val))
else
else if (has_runtime_bits)
try wip_nav.blockValue(src_loc, .fromInterned(un.val));
}
try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
.restricted_value => |restricted_value| { // repr checked above
try wip_nav.abbrevCode(.location_comptime_value);
const unrestricted_unit, const unrestricted_entry =
try wip_nav.getValueEntry(.fromInterned(restricted_value.unrestricted_value));
try wip_nav.infoExprLoc(.{ .implicit_pointer = .{
.unit = unrestricted_unit,
.entry = unrestricted_entry,
} });
.restricted_value => |restricted_value| {
try wip_nav.abbrevCode(.aggregate_comptime_value);
try wip_nav.refType(.fromInterned(restricted_value.ty));
field: {
const unrestricted_ty: Type = .fromInterned(ip.typeOf(restricted_value.unrestricted_value));
const has_runtime_bits = unrestricted_ty.hasRuntimeBits(zcu);
const has_comptime_state = unrestricted_ty.comptimeOnly(zcu);
try wip_nav.abbrevCode(if (has_comptime_state)
.comptime_value_field_comptime_state
else if (has_runtime_bits)
.comptime_value_field_runtime_bits
else
break :field);
try wip_nav.strp("value");
if (has_comptime_state)
try wip_nav.refValue(.fromInterned(restricted_value.unrestricted_value))
else if (has_runtime_bits)
try wip_nav.blockValue(src_loc, .fromInterned(restricted_value.unrestricted_value));
}
try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
.memoized_call => unreachable, // not a value
}
+2 -4
View File
@@ -2981,10 +2981,7 @@ pub fn lowerUav(
if (gop.found_existing) {
gop.value_ptr.alignment = gop.value_ptr.alignment.max(uav_align);
} else {
gop.value_ptr.* = .{
.alignment = uav_align,
.src_loc = src_loc,
};
gop.value_ptr.* = .{ .alignment = uav_align, .src_loc = src_loc };
elf.const_prog_node.increaseEstimatedTotalItems(1);
}
}
@@ -3245,6 +3242,7 @@ fn flushUav(
try elf.nodes.ensureUnusedCapacity(gpa, 1);
const sec_si = elf.si.data;
const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{
.size = Type.fromInterned(zcu.intern_pool.typeOf(uav_val)).abiSize(zcu),
.alignment = uav_align.toStdMem(),
.moved = true,
});
+14 -9
View File
@@ -908,9 +908,13 @@ pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.Compile
};
}
pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, incremental: bool, comptime feature: Feature) bool {
pub inline fn backendSupportsFeature(comptime feature: Feature, opts: struct {
backend: std.builtin.CompilerBackend,
incremental: bool,
use_new_linker: bool,
}) bool {
return switch (feature) {
.panic_fn => switch (backend) {
.panic_fn => switch (opts.backend) {
.stage2_aarch64,
.stage2_c,
.stage2_llvm,
@@ -920,23 +924,23 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, incre
=> true,
else => false,
},
.error_return_trace => switch (backend) {
.error_return_trace => switch (opts.backend) {
.stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.is_named_enum_value => switch (backend) {
.is_named_enum_value => switch (opts.backend) {
.stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.error_set_has_value => switch (backend) {
.error_set_has_value => switch (opts.backend) {
.stage2_llvm, .stage2_wasm, .stage2_x86_64 => true,
else => false,
},
.field_reordering => switch (backend) {
.field_reordering => switch (opts.backend) {
.stage2_aarch64, .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.separate_thread => switch (backend) {
.separate_thread => switch (opts.backend) {
// Supports a separate thread but does not support N separate
// threads because they would all just be locking the same mutex to
// protect Builder.
@@ -948,9 +952,10 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, incre
// being run in a separate thread from now on.
else => true,
},
.restricted_types => switch (backend) {
.restricted_types => switch (opts.backend) {
.stage2_c => true,
.stage2_llvm, .stage2_x86_64 => !incremental,
.stage2_llvm => !opts.incremental,
.stage2_x86_64 => !opts.incremental and opts.use_new_linker,
else => false,
},
};