mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-04-30 14:52:41 +03:00
bcb1a6bdf3
Introduces a small abstraction, `link.DebugConstPool`, to deal with lowering type/value information into debug info when it may not be known until type resolution (which in some cases will *never* happen). It is currently only used by self-hosted DWARF logic, but it will also be of use to the LLVM backend (which is my next focus).
1182 lines
48 KiB
Zig
1182 lines
48 KiB
Zig
const std = @import("std");
|
|
const build_options = @import("build_options");
|
|
const builtin = @import("builtin");
|
|
const assert = std.debug.assert;
|
|
const link = @import("link.zig");
|
|
const log = std.log.scoped(.codegen);
|
|
const mem = std.mem;
|
|
const math = std.math;
|
|
const target_util = @import("target.zig");
|
|
const trace = @import("tracy.zig").trace;
|
|
|
|
const Air = @import("Air.zig");
|
|
const Allocator = mem.Allocator;
|
|
const Compilation = @import("Compilation.zig");
|
|
const ErrorMsg = Zcu.ErrorMsg;
|
|
const InternPool = @import("InternPool.zig");
|
|
const Zcu = @import("Zcu.zig");
|
|
|
|
const Type = @import("Type.zig");
|
|
const Value = @import("Value.zig");
|
|
const Zir = std.zig.Zir;
|
|
const Alignment = InternPool.Alignment;
|
|
const dev = @import("dev.zig");
|
|
|
|
pub const aarch64 = @import("codegen/aarch64.zig");
|
|
|
|
pub const CodeGenError = GenerateSymbolError || error{
|
|
/// Indicates the error is already stored in Zcu `failed_codegen`.
|
|
CodegenFail,
|
|
};
|
|
|
|
fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
|
|
return switch (backend) {
|
|
.other, .stage1 => unreachable,
|
|
.stage2_aarch64 => .aarch64_backend,
|
|
.stage2_arm => .arm_backend,
|
|
.stage2_c => .c_backend,
|
|
.stage2_llvm => .llvm_backend,
|
|
.stage2_powerpc => unreachable,
|
|
.stage2_riscv64 => .riscv64_backend,
|
|
.stage2_sparc64 => .sparc64_backend,
|
|
.stage2_spirv => .spirv_backend,
|
|
.stage2_wasm => .wasm_backend,
|
|
.stage2_x86 => .x86_backend,
|
|
.stage2_x86_64 => .x86_64_backend,
|
|
_ => unreachable,
|
|
};
|
|
}
|
|
|
|
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
|
|
return switch (backend) {
|
|
.other, .stage1 => unreachable,
|
|
.stage2_aarch64 => aarch64,
|
|
.stage2_arm => unreachable,
|
|
.stage2_c => @import("codegen/c.zig"),
|
|
.stage2_llvm => @import("codegen/llvm.zig"),
|
|
.stage2_powerpc => unreachable,
|
|
.stage2_riscv64 => @import("codegen/riscv64/CodeGen.zig"),
|
|
.stage2_sparc64 => @import("codegen/sparc64/CodeGen.zig"),
|
|
.stage2_spirv => @import("codegen/spirv/CodeGen.zig"),
|
|
.stage2_wasm => @import("codegen/wasm/CodeGen.zig"),
|
|
.stage2_x86, .stage2_x86_64 => @import("codegen/x86_64/CodeGen.zig"),
|
|
_ => unreachable,
|
|
};
|
|
}
|
|
|
|
pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*const Air.Legalize.Features {
|
|
const zcu = pt.zcu;
|
|
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
|
|
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
|
|
else => unreachable,
|
|
inline .stage2_llvm,
|
|
.stage2_c,
|
|
.stage2_wasm,
|
|
.stage2_x86_64,
|
|
.stage2_aarch64,
|
|
.stage2_x86,
|
|
.stage2_riscv64,
|
|
.stage2_sparc64,
|
|
.stage2_spirv,
|
|
=> |backend| {
|
|
dev.check(devFeatureForBackend(backend));
|
|
return importBackend(backend).legalizeFeatures(target);
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn wantsLiveness(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) bool {
|
|
const zcu = pt.zcu;
|
|
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
|
|
return switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
|
|
else => true,
|
|
.stage2_aarch64 => false,
|
|
};
|
|
}
|
|
|
|
/// Every code generation backend has a different MIR representation. However, we want to pass
|
|
/// MIR from codegen to the linker *regardless* of which backend is in use. So, we use this: a
|
|
/// union of all MIR types. The active tag is known from the backend in use; see `AnyMir.tag`.
|
|
pub const AnyMir = union {
|
|
aarch64: if (dev.env.supports(.aarch64_backend)) @import("codegen/aarch64/Mir.zig") else noreturn,
|
|
riscv64: if (dev.env.supports(.riscv64_backend)) @import("codegen/riscv64/Mir.zig") else noreturn,
|
|
sparc64: if (dev.env.supports(.sparc64_backend)) @import("codegen/sparc64/Mir.zig") else noreturn,
|
|
x86_64: if (dev.env.supports(.x86_64_backend)) @import("codegen/x86_64/Mir.zig") else noreturn,
|
|
wasm: if (dev.env.supports(.wasm_backend)) @import("codegen/wasm/Mir.zig") else noreturn,
|
|
c: if (dev.env.supports(.c_backend)) @import("codegen/c.zig").Mir else noreturn,
|
|
|
|
pub inline fn tag(comptime backend: std.builtin.CompilerBackend) []const u8 {
|
|
return switch (backend) {
|
|
.stage2_aarch64 => "aarch64",
|
|
.stage2_riscv64 => "riscv64",
|
|
.stage2_sparc64 => "sparc64",
|
|
.stage2_x86_64 => "x86_64",
|
|
.stage2_wasm => "wasm",
|
|
.stage2_c => "c",
|
|
else => unreachable,
|
|
};
|
|
}
|
|
|
|
pub fn deinit(mir: *AnyMir, zcu: *const Zcu) void {
|
|
const gpa = zcu.gpa;
|
|
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
|
|
switch (backend) {
|
|
else => unreachable,
|
|
inline .stage2_aarch64,
|
|
.stage2_riscv64,
|
|
.stage2_sparc64,
|
|
.stage2_x86_64,
|
|
.stage2_wasm,
|
|
.stage2_c,
|
|
=> |backend_ct| @field(mir, tag(backend_ct)).deinit(gpa),
|
|
}
|
|
}
|
|
};
|
|
|
|
/// Runs code generation for a function. This process converts the `Air` emitted by `Sema`,
|
|
/// alongside annotated `Liveness` data, to machine code in the form of MIR (see `AnyMir`).
|
|
///
|
|
/// This is supposed to be a "pure" process, but some backends are currently buggy; see
|
|
/// `Zcu.Feature.separate_thread` for details.
|
|
pub fn generateFunction(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
func_index: InternPool.Index,
|
|
air: *const Air,
|
|
liveness: *const ?Air.Liveness,
|
|
) CodeGenError!AnyMir {
|
|
const zcu = pt.zcu;
|
|
const func = zcu.funcInfo(func_index);
|
|
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
|
|
switch (target_util.zigBackend(target, false)) {
|
|
else => unreachable,
|
|
inline .stage2_aarch64,
|
|
.stage2_riscv64,
|
|
.stage2_sparc64,
|
|
.stage2_x86_64,
|
|
.stage2_wasm,
|
|
.stage2_c,
|
|
=> |backend| {
|
|
dev.check(devFeatureForBackend(backend));
|
|
const CodeGen = importBackend(backend);
|
|
const mir = try CodeGen.generate(lf, pt, src_loc, func_index, air, liveness);
|
|
return @unionInit(AnyMir, AnyMir.tag(backend), mir);
|
|
},
|
|
}
|
|
}
|
|
|
|
/// Converts the MIR returned by `generateFunction` to finalized machine code to be placed in
|
|
/// the output binary. This is called from linker implementations, and may query linker state.
|
|
///
|
|
/// This function is not called for the C backend, as `link.C` directly understands its MIR.
|
|
///
|
|
/// The `air` parameter is not supposed to exist, but some backends are currently buggy; see
|
|
/// `Zcu.Feature.separate_thread` for details.
|
|
pub fn emitFunction(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
func_index: InternPool.Index,
|
|
atom_index: u32,
|
|
any_mir: *const AnyMir,
|
|
w: *std.Io.Writer,
|
|
debug_output: link.File.DebugInfoOutput,
|
|
) (CodeGenError || std.Io.Writer.Error)!void {
|
|
const zcu = pt.zcu;
|
|
const func = zcu.funcInfo(func_index);
|
|
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
|
|
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
|
|
else => unreachable,
|
|
inline .stage2_aarch64,
|
|
.stage2_riscv64,
|
|
.stage2_sparc64,
|
|
.stage2_x86_64,
|
|
=> |backend| {
|
|
dev.check(devFeatureForBackend(backend));
|
|
const mir = &@field(any_mir, AnyMir.tag(backend));
|
|
return mir.emit(lf, pt, src_loc, func_index, atom_index, w, debug_output);
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn generateLazyFunction(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
lazy_sym: link.File.LazySymbol,
|
|
atom_index: u32,
|
|
w: *std.Io.Writer,
|
|
debug_output: link.File.DebugInfoOutput,
|
|
) (CodeGenError || std.Io.Writer.Error)!void {
|
|
const zcu = pt.zcu;
|
|
const target = if (Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu)) |inst_index|
|
|
&zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
|
|
else
|
|
zcu.getTarget();
|
|
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
|
|
else => unreachable,
|
|
inline .stage2_riscv64, .stage2_x86_64 => |backend| {
|
|
dev.check(devFeatureForBackend(backend));
|
|
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, atom_index, w, debug_output);
|
|
},
|
|
}
|
|
}
|
|
|
|
pub fn generateLazySymbol(
|
|
bin_file: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
lazy_sym: link.File.LazySymbol,
|
|
// TODO don't use an "out" parameter like this; put it in the result instead
|
|
alignment: *Alignment,
|
|
w: *std.Io.Writer,
|
|
debug_output: link.File.DebugInfoOutput,
|
|
reloc_parent: link.File.RelocInfo.Parent,
|
|
) (CodeGenError || std.Io.Writer.Error)!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const comp = bin_file.comp;
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const target = &comp.root_mod.resolved_target.result;
|
|
const endian = target.cpu.arch.endian();
|
|
|
|
log.debug("generateLazySymbol: kind = {s}, ty = {f}", .{
|
|
@tagName(lazy_sym.kind),
|
|
Type.fromInterned(lazy_sym.ty).fmt(pt),
|
|
});
|
|
|
|
if (lazy_sym.kind == .code) {
|
|
alignment.* = target_util.defaultFunctionAlignment(target);
|
|
return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, reloc_parent.atom_index, w, debug_output);
|
|
}
|
|
|
|
if (lazy_sym.ty == .anyerror_type) {
|
|
alignment.* = .@"4";
|
|
const err_names = ip.global_error_set.getNamesFromMainThread();
|
|
const strings_start: u32 = @intCast(4 * (1 + err_names.len + @intFromBool(err_names.len > 0)));
|
|
var string_index = strings_start;
|
|
try w.rebase(w.end, string_index);
|
|
w.writeInt(u32, @intCast(err_names.len), endian) catch unreachable;
|
|
if (err_names.len == 0) return;
|
|
for (err_names) |err_name_nts| {
|
|
w.writeInt(u32, string_index, endian) catch unreachable;
|
|
string_index += @intCast(err_name_nts.toSlice(ip).len + 1);
|
|
}
|
|
w.writeInt(u32, string_index, endian) catch unreachable;
|
|
try w.rebase(w.end, string_index - strings_start);
|
|
for (err_names) |err_name_nts| {
|
|
w.writeAll(err_name_nts.toSlice(ip)) catch unreachable;
|
|
w.writeByte(0) catch unreachable;
|
|
}
|
|
} else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(zcu) == .@"enum") {
|
|
alignment.* = .@"1";
|
|
const enum_ty = Type.fromInterned(lazy_sym.ty);
|
|
const tag_names = enum_ty.enumFields(zcu);
|
|
for (0..tag_names.len) |tag_index| {
|
|
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
|
|
try w.rebase(w.end, tag_name.len + 1);
|
|
w.writeAll(tag_name) catch unreachable;
|
|
w.writeByte(0) catch unreachable;
|
|
}
|
|
} else {
|
|
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {f}", .{
|
|
@tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt),
|
|
});
|
|
}
|
|
}
|
|
|
|
pub const GenerateSymbolError = error{
|
|
OutOfMemory,
|
|
/// Compiler was asked to operate on a number larger than supported.
|
|
Overflow,
|
|
/// Compiler was asked to produce a non-byte-aligned relocation.
|
|
RelocationNotByteAligned,
|
|
};
|
|
|
|
pub fn generateSymbol(
|
|
bin_file: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
val: Value,
|
|
w: *std.Io.Writer,
|
|
reloc_parent: link.File.RelocInfo.Parent,
|
|
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
|
const tracy = trace(@src());
|
|
defer tracy.end();
|
|
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const ty = val.typeOf(zcu);
|
|
|
|
const target = zcu.getTarget();
|
|
const endian = target.cpu.arch.endian();
|
|
|
|
log.debug("generateSymbol: val = {f}", .{val.fmtValue(pt)});
|
|
|
|
if (val.isUndef(zcu)) {
|
|
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
|
try w.splatByteAll(0xaa, abi_size);
|
|
return;
|
|
}
|
|
|
|
switch (ip.indexToKey(val.toIntern())) {
|
|
.int_type,
|
|
.ptr_type,
|
|
.array_type,
|
|
.vector_type,
|
|
.opt_type,
|
|
.anyframe_type,
|
|
.error_union_type,
|
|
.simple_type,
|
|
.struct_type,
|
|
.tuple_type,
|
|
.union_type,
|
|
.opaque_type,
|
|
.enum_type,
|
|
.func_type,
|
|
.error_set_type,
|
|
.inferred_error_set_type,
|
|
=> unreachable, // types, not values
|
|
|
|
.undef => unreachable, // handled above
|
|
.simple_value => |simple_value| switch (simple_value) {
|
|
.void => unreachable, // non-runtime value
|
|
.null => unreachable, // non-runtime value
|
|
.@"unreachable" => unreachable, // non-runtime value
|
|
.false, .true => try w.writeByte(switch (simple_value) {
|
|
.false => 0,
|
|
.true => 1,
|
|
else => unreachable,
|
|
}),
|
|
},
|
|
.variable,
|
|
.@"extern",
|
|
.func,
|
|
.enum_literal,
|
|
=> unreachable, // non-runtime values
|
|
.int => {
|
|
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
|
var space: Value.BigIntSpace = undefined;
|
|
const int_val = val.toBigInt(&space, zcu);
|
|
int_val.writeTwosComplement(try w.writableSlice(abi_size), endian);
|
|
},
|
|
.err => |err| {
|
|
const int = try pt.getErrorValue(err.name);
|
|
try w.writeInt(u16, @intCast(int), endian);
|
|
},
|
|
.error_union => |error_union| {
|
|
const payload_ty = ty.errorUnionPayload(zcu);
|
|
const err_val: u16 = switch (error_union.val) {
|
|
.err_name => |err_name| @intCast(try pt.getErrorValue(err_name)),
|
|
.payload => 0,
|
|
};
|
|
|
|
if (!payload_ty.hasRuntimeBits(zcu)) {
|
|
try w.writeInt(u16, err_val, endian);
|
|
return;
|
|
}
|
|
|
|
const payload_align = payload_ty.abiAlignment(zcu);
|
|
const error_align = Type.anyerror.abiAlignment(zcu);
|
|
const abi_align = ty.abiAlignment(zcu);
|
|
|
|
// error value first when its type is larger than the error union's payload
|
|
if (error_align.order(payload_align) == .gt) {
|
|
try w.writeInt(u16, err_val, endian);
|
|
}
|
|
|
|
// emit payload part of the error union
|
|
{
|
|
const begin = w.end;
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) {
|
|
.err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }),
|
|
.payload => |payload| payload,
|
|
}), w, reloc_parent);
|
|
const unpadded_end = w.end - begin;
|
|
const padded_end = abi_align.forward(unpadded_end);
|
|
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
|
|
|
if (padding > 0) {
|
|
try w.splatByteAll(0, padding);
|
|
}
|
|
}
|
|
|
|
// Payload size is larger than error set, so emit our error set last
|
|
if (error_align.compare(.lte, payload_align)) {
|
|
const begin = w.end;
|
|
try w.writeInt(u16, err_val, endian);
|
|
const unpadded_end = w.end - begin;
|
|
const padded_end = abi_align.forward(unpadded_end);
|
|
const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow;
|
|
|
|
if (padding > 0) {
|
|
try w.splatByteAll(0, padding);
|
|
}
|
|
}
|
|
},
|
|
.enum_tag => |enum_tag| {
|
|
const int_tag_ty = ty.intTagType(zcu);
|
|
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), w, reloc_parent);
|
|
},
|
|
.float => |float| storage: switch (float.storage) {
|
|
.f16 => |f16_val| try w.writeInt(u16, @bitCast(f16_val), endian),
|
|
.f32 => |f32_val| try w.writeInt(u32, @bitCast(f32_val), endian),
|
|
.f64 => |f64_val| try w.writeInt(u64, @bitCast(f64_val), endian),
|
|
.f80 => |f80_val| {
|
|
try w.writeInt(u80, @bitCast(f80_val), endian);
|
|
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
|
try w.splatByteAll(0, abi_size - 10);
|
|
},
|
|
.f128 => |f128_val| switch (Type.fromInterned(float.ty).floatBits(target)) {
|
|
else => unreachable,
|
|
16 => continue :storage .{ .f16 = @floatCast(f128_val) },
|
|
32 => continue :storage .{ .f32 = @floatCast(f128_val) },
|
|
64 => continue :storage .{ .f64 = @floatCast(f128_val) },
|
|
128 => try w.writeInt(u128, @bitCast(f128_val), endian),
|
|
},
|
|
},
|
|
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), w, reloc_parent, 0),
|
|
.slice => |slice| {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), w, reloc_parent);
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), w, reloc_parent);
|
|
},
|
|
.opt => {
|
|
const payload_type = ty.optionalChild(zcu);
|
|
const payload_val = val.optionalValue(zcu);
|
|
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
|
|
|
if (ty.optionalReprIsPayload(zcu)) {
|
|
if (payload_val) |value| {
|
|
try generateSymbol(bin_file, pt, src_loc, value, w, reloc_parent);
|
|
} else {
|
|
try w.splatByteAll(0, abi_size);
|
|
}
|
|
} else {
|
|
const padding = abi_size - (math.cast(usize, payload_type.abiSize(zcu)) orelse return error.Overflow) - 1;
|
|
if (payload_type.hasRuntimeBits(zcu)) {
|
|
const value = payload_val orelse Value.fromInterned(try pt.intern(.{
|
|
.undef = payload_type.toIntern(),
|
|
}));
|
|
try generateSymbol(bin_file, pt, src_loc, value, w, reloc_parent);
|
|
}
|
|
try w.writeByte(@intFromBool(payload_val != null));
|
|
try w.splatByteAll(0, padding);
|
|
}
|
|
},
|
|
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
|
|
.array_type => |array_type| switch (aggregate.storage) {
|
|
.bytes => |bytes| try w.writeAll(bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
|
|
.elems, .repeated_elem => {
|
|
var index: u64 = 0;
|
|
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
|
.bytes => unreachable,
|
|
.elems => |elems| elems[@intCast(index)],
|
|
.repeated_elem => |elem| if (index < array_type.len)
|
|
elem
|
|
else
|
|
array_type.sentinel,
|
|
}), w, reloc_parent);
|
|
}
|
|
},
|
|
},
|
|
.vector_type => |vector_type| {
|
|
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
|
if (vector_type.child == .bool_type) {
|
|
const bytes = try w.writableSlice(abi_size);
|
|
@memset(bytes, 0xaa);
|
|
var index: usize = 0;
|
|
const len = math.cast(usize, vector_type.len) orelse return error.Overflow;
|
|
while (index < len) : (index += 1) {
|
|
const bit_index = switch (endian) {
|
|
.big => len - 1 - index,
|
|
.little => index,
|
|
};
|
|
const byte = &bytes[bit_index / 8];
|
|
const mask = @as(u8, 1) << @truncate(bit_index);
|
|
if (switch (switch (aggregate.storage) {
|
|
.bytes => unreachable,
|
|
.elems => |elems| elems[index],
|
|
.repeated_elem => |elem| elem,
|
|
}) {
|
|
.bool_true => true,
|
|
.bool_false => false,
|
|
else => |elem| switch (ip.indexToKey(elem)) {
|
|
.undef => continue,
|
|
.int => |int| switch (int.storage) {
|
|
.u64 => |x| switch (x) {
|
|
0 => false,
|
|
1 => true,
|
|
else => unreachable,
|
|
},
|
|
.i64 => |x| switch (x) {
|
|
-1 => true,
|
|
0 => false,
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
else => unreachable,
|
|
},
|
|
}) byte.* |= mask else byte.* &= ~mask;
|
|
}
|
|
} else {
|
|
switch (aggregate.storage) {
|
|
.bytes => |bytes| try w.writeAll(bytes.toSlice(vector_type.len, ip)),
|
|
.elems, .repeated_elem => {
|
|
var index: u64 = 0;
|
|
while (index < vector_type.len) : (index += 1) {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (aggregate.storage) {
|
|
.bytes => unreachable,
|
|
.elems => |elems| elems[math.cast(usize, index) orelse return error.Overflow],
|
|
.repeated_elem => |elem| elem,
|
|
}), w, reloc_parent);
|
|
}
|
|
},
|
|
}
|
|
|
|
const padding = abi_size -
|
|
(math.cast(usize, Type.fromInterned(vector_type.child).abiSize(zcu) * vector_type.len) orelse
|
|
return error.Overflow);
|
|
if (padding > 0) try w.splatByteAll(0, padding);
|
|
}
|
|
},
|
|
.tuple_type => |tuple| {
|
|
const struct_begin = w.end;
|
|
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, field_val, field_index| {
|
|
if (field_val != .none) continue;
|
|
if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
|
|
|
|
try w.splatByteAll(0, math.cast(usize, struct_begin +
|
|
Type.fromInterned(field_ty).abiAlignment(zcu).forward(w.end - struct_begin) - w.end) orelse
|
|
return error.Overflow);
|
|
try generateSymbol(bin_file, pt, src_loc, .fromInterned(switch (aggregate.storage) {
|
|
.bytes => |bytes| try pt.intern(.{ .int = .{
|
|
.ty = field_ty,
|
|
.storage = .{ .u64 = bytes.at(field_index, ip) },
|
|
} }),
|
|
.elems => |elems| elems[field_index],
|
|
.repeated_elem => |elem| elem,
|
|
}), w, reloc_parent);
|
|
}
|
|
try w.splatByteAll(0, math.cast(usize, struct_begin + ty.abiSize(zcu) - w.end) orelse
|
|
return error.Overflow);
|
|
},
|
|
.struct_type => {
|
|
const struct_type = ip.loadStructType(ty.toIntern());
|
|
switch (struct_type.layout) {
|
|
.@"packed" => unreachable,
|
|
.auto, .@"extern" => {
|
|
const struct_begin = w.end;
|
|
const field_types = struct_type.field_types.get(ip);
|
|
const offsets = struct_type.field_offsets.get(ip);
|
|
|
|
var it = struct_type.iterateRuntimeOrder(ip);
|
|
while (it.next()) |field_index| {
|
|
const field_ty = field_types[field_index];
|
|
if (!Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
|
|
|
|
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
|
|
.bytes => |bytes| try pt.intern(.{ .int = .{
|
|
.ty = field_ty,
|
|
.storage = .{ .u64 = bytes.at(field_index, ip) },
|
|
} }),
|
|
.elems => |elems| elems[field_index],
|
|
.repeated_elem => |elem| elem,
|
|
};
|
|
|
|
const padding = math.cast(
|
|
usize,
|
|
offsets[field_index] - (w.end - struct_begin),
|
|
) orelse return error.Overflow;
|
|
if (padding > 0) try w.splatByteAll(0, padding);
|
|
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
|
|
}
|
|
|
|
assert(struct_type.alignment.check(struct_type.size));
|
|
|
|
const padding = math.cast(usize, struct_type.size - (w.end - struct_begin)) orelse {
|
|
return error.Overflow;
|
|
};
|
|
if (padding > 0) try w.splatByteAll(0, padding);
|
|
},
|
|
}
|
|
},
|
|
else => unreachable,
|
|
},
|
|
.un => |un| {
|
|
const layout = ty.unionGetLayout(zcu);
|
|
|
|
if (layout.payload_size == 0) {
|
|
return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
|
|
}
|
|
|
|
// Check if we should store the tag first.
|
|
if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
|
|
}
|
|
|
|
const union_obj = zcu.typeToUnion(ty).?;
|
|
if (un.tag != .none) {
|
|
const field_index = ty.unionTagFieldIndex(Value.fromInterned(un.tag), zcu).?;
|
|
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
|
if (!field_ty.hasRuntimeBits(zcu)) {
|
|
try w.splatByteAll(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow);
|
|
} else {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), w, reloc_parent);
|
|
|
|
const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(zcu)) orelse return error.Overflow;
|
|
if (padding > 0) {
|
|
try w.splatByteAll(0, padding);
|
|
}
|
|
}
|
|
} else {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), w, reloc_parent);
|
|
}
|
|
|
|
if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) {
|
|
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), w, reloc_parent);
|
|
|
|
if (layout.padding > 0) {
|
|
try w.splatByteAll(0, layout.padding);
|
|
}
|
|
}
|
|
},
|
|
.bitpack => |bitpack| try generateSymbol(bin_file, pt, src_loc, .fromInterned(bitpack.backing_int_val), w, reloc_parent),
|
|
.memoized_call => unreachable,
|
|
}
|
|
}
|
|
|
|
fn lowerPtr(
|
|
bin_file: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
ptr_val: InternPool.Index,
|
|
w: *std.Io.Writer,
|
|
reloc_parent: link.File.RelocInfo.Parent,
|
|
prev_offset: u64,
|
|
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
|
const zcu = pt.zcu;
|
|
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
|
|
const offset: u64 = prev_offset + ptr.byte_offset;
|
|
return switch (ptr.base_addr) {
|
|
.nav => |nav| try lowerNavRef(bin_file, pt, nav, w, reloc_parent, offset),
|
|
.uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, w, reloc_parent, offset),
|
|
.int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), w, reloc_parent),
|
|
.eu_payload => |eu_ptr| try lowerPtr(
|
|
bin_file,
|
|
pt,
|
|
src_loc,
|
|
eu_ptr,
|
|
w,
|
|
reloc_parent,
|
|
offset + errUnionPayloadOffset(
|
|
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu),
|
|
zcu,
|
|
),
|
|
),
|
|
.opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, w, reloc_parent, offset),
|
|
.field => |field| {
|
|
const base_ptr = Value.fromInterned(field.base);
|
|
const base_ty = base_ptr.typeOf(zcu).childType(zcu);
|
|
const field_off: u64 = switch (base_ty.zigTypeTag(zcu)) {
|
|
.pointer => off: {
|
|
assert(base_ty.isSlice(zcu));
|
|
break :off switch (field.index) {
|
|
Value.slice_ptr_index => 0,
|
|
Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8),
|
|
else => unreachable,
|
|
};
|
|
},
|
|
.@"struct", .@"union" => switch (base_ty.containerLayout(zcu)) {
|
|
.auto => base_ty.structFieldOffset(@intCast(field.index), zcu),
|
|
.@"extern", .@"packed" => unreachable,
|
|
},
|
|
else => unreachable,
|
|
};
|
|
return lowerPtr(bin_file, pt, src_loc, field.base, w, reloc_parent, offset + field_off);
|
|
},
|
|
.arr_elem => |arr_elem| {
|
|
const base_ptr_ty = Value.fromInterned(arr_elem.base).typeOf(zcu);
|
|
assert(base_ptr_ty.ptrSize(zcu) == .many);
|
|
const elem_size = base_ptr_ty.childType(zcu).abiSize(zcu);
|
|
return lowerPtr(bin_file, pt, src_loc, arr_elem.base, w, reloc_parent, offset + elem_size * arr_elem.index);
|
|
},
|
|
.comptime_alloc => unreachable,
|
|
.comptime_field => unreachable,
|
|
};
|
|
}
|
|
|
|
fn lowerUavRef(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
|
w: *std.Io.Writer,
|
|
reloc_parent: link.File.RelocInfo.Parent,
|
|
offset: u64,
|
|
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const comp = lf.comp;
|
|
const target = &comp.root_mod.resolved_target.result;
|
|
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
|
|
const uav_val = uav.val;
|
|
const uav_ty = Type.fromInterned(ip.typeOf(uav_val));
|
|
const is_fn_body = uav_ty.zigTypeTag(zcu) == .@"fn";
|
|
|
|
log.debug("lowerUavRef: ty = {f}", .{uav_ty.fmt(pt)});
|
|
|
|
if (!is_fn_body and !uav_ty.hasRuntimeBits(zcu)) {
|
|
try w.splatByteAll(0xaa, ptr_width_bytes);
|
|
return;
|
|
}
|
|
|
|
switch (lf.tag) {
|
|
.c => unreachable,
|
|
.spirv => unreachable,
|
|
.wasm => {
|
|
dev.check(link.File.Tag.wasm.devFeature());
|
|
const wasm = lf.cast(.wasm).?;
|
|
assert(reloc_parent == .none);
|
|
try wasm.addUavReloc(w.end, uav.val, uav.orig_ty, @intCast(offset));
|
|
try w.splatByteAll(0, ptr_width_bytes);
|
|
return;
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const uav_align = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu);
|
|
switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) {
|
|
.sym_index => {},
|
|
.fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}),
|
|
}
|
|
|
|
const vaddr = lf.getUavVAddr(uav_val, .{
|
|
.parent = reloc_parent,
|
|
.offset = w.end,
|
|
.addend = @intCast(offset),
|
|
}) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
else => |e| std.debug.panic("TODO rework lowerUav. internal error: {t}", .{e}),
|
|
};
|
|
const endian = target.cpu.arch.endian();
|
|
switch (ptr_width_bytes) {
|
|
2 => try w.writeInt(u16, @intCast(vaddr), endian),
|
|
4 => try w.writeInt(u32, @intCast(vaddr), endian),
|
|
8 => try w.writeInt(u64, vaddr, endian),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
fn lowerNavRef(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
nav_index: InternPool.Nav.Index,
|
|
w: *std.Io.Writer,
|
|
reloc_parent: link.File.RelocInfo.Parent,
|
|
offset: u64,
|
|
) (GenerateSymbolError || std.Io.Writer.Error)!void {
|
|
const zcu = pt.zcu;
|
|
const gpa = zcu.gpa;
|
|
const ip = &zcu.intern_pool;
|
|
const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
|
|
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
|
|
const is_obj = lf.comp.config.output_mode == .Obj;
|
|
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
|
|
|
|
if (!nav_ty.isRuntimeFnOrHasRuntimeBits(zcu) and ip.getNav(nav_index).getExtern(ip) == null) {
|
|
try w.splatByteAll(0xaa, ptr_width_bytes);
|
|
return;
|
|
}
|
|
|
|
switch (lf.tag) {
|
|
.c => unreachable,
|
|
.spirv => unreachable,
|
|
.wasm => {
|
|
dev.check(link.File.Tag.wasm.devFeature());
|
|
const wasm = lf.cast(.wasm).?;
|
|
assert(reloc_parent == .none);
|
|
if (nav_ty.zigTypeTag(zcu) == .@"fn") {
|
|
const gop = try wasm.zcu_indirect_function_set.getOrPut(gpa, nav_index);
|
|
if (!gop.found_existing) gop.value_ptr.* = {};
|
|
if (is_obj) {
|
|
@panic("TODO add out_reloc for this");
|
|
} else {
|
|
try wasm.func_table_fixups.append(gpa, .{
|
|
.table_index = @enumFromInt(gop.index),
|
|
.offset = @intCast(w.end),
|
|
});
|
|
}
|
|
} else {
|
|
if (is_obj) {
|
|
try wasm.out_relocs.append(gpa, .{
|
|
.offset = @intCast(w.end),
|
|
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(nav_index) },
|
|
.tag = if (ptr_width_bytes == 4) .memory_addr_i32 else .memory_addr_i64,
|
|
.addend = @intCast(offset),
|
|
});
|
|
} else {
|
|
try wasm.nav_fixups.ensureUnusedCapacity(gpa, 1);
|
|
wasm.nav_fixups.appendAssumeCapacity(.{
|
|
.navs_exe_index = try wasm.refNavExe(nav_index),
|
|
.offset = @intCast(w.end),
|
|
.addend = @intCast(offset),
|
|
});
|
|
}
|
|
}
|
|
try w.splatByteAll(0, ptr_width_bytes);
|
|
return;
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
const vaddr = lf.getNavVAddr(pt, nav_index, .{
|
|
.parent = reloc_parent,
|
|
.offset = w.end,
|
|
.addend = @intCast(offset),
|
|
}) catch @panic("TODO rework getNavVAddr");
|
|
const endian = target.cpu.arch.endian();
|
|
switch (ptr_width_bytes) {
|
|
2 => try w.writeInt(u16, @intCast(vaddr), endian),
|
|
4 => try w.writeInt(u32, @intCast(vaddr), endian),
|
|
8 => try w.writeInt(u64, vaddr, endian),
|
|
else => unreachable,
|
|
}
|
|
}
|
|
|
|
/// Helper struct to denote that the value is in memory but requires a linker relocation fixup:
|
|
/// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc)
|
|
/// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc)
|
|
/// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc)
|
|
pub const LinkerLoad = struct {
|
|
type: enum {
|
|
got,
|
|
direct,
|
|
import,
|
|
},
|
|
sym_index: u32,
|
|
};
|
|
|
|
pub const SymbolResult = union(enum) { sym_index: u32, fail: *ErrorMsg };
|
|
|
|
pub fn genNavRef(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
nav_index: InternPool.Nav.Index,
|
|
target: *const std.Target,
|
|
) CodeGenError!SymbolResult {
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const nav = ip.getNav(nav_index);
|
|
log.debug("genNavRef({f})", .{nav.fqn.fmt(ip)});
|
|
|
|
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
|
|
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
|
|
else
|
|
.{ .none, .internal, false };
|
|
if (lf.cast(.elf)) |elf_file| {
|
|
const zo = elf_file.zigObjectPtr().?;
|
|
switch (linkage) {
|
|
.internal => {
|
|
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
|
|
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
|
|
return .{ .sym_index = sym_index };
|
|
},
|
|
.strong, .weak => {
|
|
const sym_index = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
|
switch (linkage) {
|
|
.internal => unreachable,
|
|
.strong => {},
|
|
.weak => zo.symbol(sym_index).flags.weak = true,
|
|
.link_once => unreachable,
|
|
}
|
|
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
|
|
return .{ .sym_index = sym_index };
|
|
},
|
|
.link_once => unreachable,
|
|
}
|
|
} else if (lf.cast(.elf2)) |elf| {
|
|
return .{ .sym_index = @intFromEnum(elf.navSymbol(zcu, nav_index) catch |err| switch (err) {
|
|
error.OutOfMemory => return error.OutOfMemory,
|
|
else => |e| return .{ .fail = try ErrorMsg.create(
|
|
zcu.gpa,
|
|
src_loc,
|
|
"linker failed to create a nav: {t}",
|
|
.{e},
|
|
) },
|
|
}) };
|
|
} else if (lf.cast(.macho)) |macho_file| {
|
|
const zo = macho_file.getZigObject().?;
|
|
switch (linkage) {
|
|
.internal => {
|
|
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
|
|
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
|
|
return .{ .sym_index = sym_index };
|
|
},
|
|
.strong, .weak => {
|
|
const sym_index = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
|
switch (linkage) {
|
|
.internal => unreachable,
|
|
.strong => {},
|
|
.weak => zo.symbols.items[sym_index].flags.weak = true,
|
|
.link_once => unreachable,
|
|
}
|
|
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
|
|
return .{ .sym_index = sym_index };
|
|
},
|
|
.link_once => unreachable,
|
|
}
|
|
} else if (lf.cast(.coff2)) |coff| {
|
|
return .{ .sym_index = @intFromEnum(try coff.navSymbol(zcu, nav_index)) };
|
|
} else {
|
|
const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
|
|
return .{ .fail = msg };
|
|
}
|
|
}
|
|
|
|
/// deprecated legacy type
|
|
pub const GenResult = union(enum) {
|
|
mcv: MCValue,
|
|
fail: *ErrorMsg,
|
|
|
|
const MCValue = union(enum) {
|
|
none,
|
|
undef,
|
|
/// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
|
|
/// such as ARM, the immediate will never exceed 32-bits.
|
|
immediate: u64,
|
|
/// Decl with address deferred until the linker allocates everything in virtual memory.
|
|
/// Payload is a symbol index.
|
|
load_direct: u32,
|
|
/// Decl with address deferred until the linker allocates everything in virtual memory.
|
|
/// Payload is a symbol index.
|
|
lea_direct: u32,
|
|
/// Decl referenced via GOT with address deferred until the linker allocates
|
|
/// everything in virtual memory.
|
|
/// Payload is a symbol index.
|
|
load_got: u32,
|
|
/// Direct by-address reference to memory location.
|
|
memory: u64,
|
|
/// Reference to memory location but deferred until linker allocated the Decl in memory.
|
|
/// Traditionally, this corresponds to emitting a relocation in a relocatable object file.
|
|
load_symbol: u32,
|
|
/// Reference to memory location but deferred until linker allocated the Decl in memory.
|
|
/// Traditionally, this corresponds to emitting a relocation in a relocatable object file.
|
|
lea_symbol: u32,
|
|
};
|
|
};
|
|
|
|
/// deprecated legacy code path
|
|
pub fn genTypedValue(
|
|
lf: *link.File,
|
|
pt: Zcu.PerThread,
|
|
src_loc: Zcu.LazySrcLoc,
|
|
val: Value,
|
|
target: *const std.Target,
|
|
) CodeGenError!GenResult {
|
|
const res = try lowerValue(pt, val, target);
|
|
return switch (res) {
|
|
.none => .{ .mcv = .none },
|
|
.undef => .{ .mcv = .undef },
|
|
.immediate => |imm| .{ .mcv = .{ .immediate = imm } },
|
|
.lea_nav => |nav| switch (try genNavRef(lf, pt, src_loc, nav, target)) {
|
|
.sym_index => |sym_index| .{ .mcv = .{ .lea_symbol = sym_index } },
|
|
.fail => |em| .{ .fail = em },
|
|
},
|
|
.load_uav, .lea_uav => |uav| switch (try lf.lowerUav(
|
|
pt,
|
|
uav.val,
|
|
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
|
|
src_loc,
|
|
)) {
|
|
.sym_index => |sym_index| .{ .mcv = switch (res) {
|
|
else => unreachable,
|
|
.load_uav => .{ .load_symbol = sym_index },
|
|
.lea_uav => .{ .lea_symbol = sym_index },
|
|
} },
|
|
.fail => |em| .{ .fail = em },
|
|
},
|
|
};
|
|
}
|
|
|
|
const LowerResult = union(enum) {
|
|
none,
|
|
undef,
|
|
/// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
|
|
/// such as ARM, the immediate will never exceed 32-bits.
|
|
immediate: u64,
|
|
lea_nav: InternPool.Nav.Index,
|
|
load_uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
|
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
|
};
|
|
|
|
pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allocator.Error!LowerResult {
|
|
const zcu = pt.zcu;
|
|
const ip = &zcu.intern_pool;
|
|
const ty = val.typeOf(zcu);
|
|
|
|
log.debug("lowerValue(@as({f}, {f}))", .{ ty.fmt(pt), val.fmtValue(pt) });
|
|
|
|
if (val.isUndef(zcu)) return .undef;
|
|
|
|
switch (ty.zigTypeTag(zcu)) {
|
|
.void => return .none,
|
|
.bool => return .{ .immediate = @intFromBool(val.toBool()) },
|
|
.pointer => switch (ty.ptrSize(zcu)) {
|
|
.slice => {},
|
|
.one, .many, .c => {
|
|
const ptr = ip.indexToKey(val.toIntern()).ptr;
|
|
if (ptr.base_addr == .int) return .{ .immediate = ptr.byte_offset };
|
|
if (ptr.byte_offset == 0) switch (ptr.base_addr) {
|
|
.int => unreachable, // handled above
|
|
|
|
.nav => |nav_index| {
|
|
const nav = ip.getNav(nav_index);
|
|
const nav_ty: Type = .fromInterned(nav.typeOf(ip));
|
|
if (nav_ty.isRuntimeFnOrHasRuntimeBits(zcu) or nav.getExtern(ip) != null) {
|
|
return .{ .lea_nav = nav_index };
|
|
} else {
|
|
// Create the 0xaa bit pattern...
|
|
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
|
|
// ...but align the pointer
|
|
const alignment = zcu.navAlignment(nav_index);
|
|
return .{ .immediate = alignment.forward(undef_ptr_bits) };
|
|
}
|
|
},
|
|
|
|
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).isRuntimeFnOrHasRuntimeBits(zcu)) {
|
|
return .{ .lea_uav = uav };
|
|
} else {
|
|
// Create the 0xaa bit pattern...
|
|
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
|
|
// ...but align the pointer
|
|
const alignment = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu);
|
|
return .{ .immediate = alignment.forward(undef_ptr_bits) };
|
|
},
|
|
|
|
else => {},
|
|
};
|
|
},
|
|
},
|
|
.int => {
|
|
const info = ty.intInfo(zcu);
|
|
if (info.bits <= target.ptrBitWidth()) {
|
|
const unsigned: u64 = switch (info.signedness) {
|
|
.signed => @bitCast(val.toSignedInt(zcu)),
|
|
.unsigned => val.toUnsignedInt(zcu),
|
|
};
|
|
return .{ .immediate = unsigned };
|
|
}
|
|
},
|
|
.optional => {
|
|
if (ty.isPtrLikeOptional(zcu)) {
|
|
return lowerValue(
|
|
pt,
|
|
val.optionalValue(zcu) orelse return .{ .immediate = 0 },
|
|
target,
|
|
);
|
|
} else if (ty.abiSize(zcu) == 1) {
|
|
return .{ .immediate = @intFromBool(!val.isNull(zcu)) };
|
|
}
|
|
},
|
|
.@"enum" => {
|
|
const enum_tag = ip.indexToKey(val.toIntern()).enum_tag;
|
|
return lowerValue(
|
|
pt,
|
|
Value.fromInterned(enum_tag.int),
|
|
target,
|
|
);
|
|
},
|
|
.@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed") {
|
|
const bitpack = ip.indexToKey(val.toIntern()).bitpack;
|
|
return lowerValue(pt, .fromInterned(bitpack.backing_int_val), target);
|
|
},
|
|
.error_set => {
|
|
const err_name = ip.indexToKey(val.toIntern()).err.name;
|
|
const error_index = ip.getErrorValueIfExists(err_name).?;
|
|
return .{ .immediate = error_index };
|
|
},
|
|
.error_union => {
|
|
const err_type = ty.errorUnionSet(zcu);
|
|
const payload_type = ty.errorUnionPayload(zcu);
|
|
if (!payload_type.hasRuntimeBits(zcu)) {
|
|
// We use the error type directly as the type.
|
|
const err_int_ty = try pt.errorIntType();
|
|
switch (ip.indexToKey(val.toIntern()).error_union.val) {
|
|
.err_name => |err_name| return lowerValue(
|
|
pt,
|
|
Value.fromInterned(try pt.intern(.{ .err = .{
|
|
.ty = err_type.toIntern(),
|
|
.name = err_name,
|
|
} })),
|
|
target,
|
|
),
|
|
.payload => return lowerValue(
|
|
pt,
|
|
try pt.intValue(err_int_ty, 0),
|
|
target,
|
|
),
|
|
}
|
|
}
|
|
},
|
|
|
|
.comptime_int => unreachable,
|
|
.comptime_float => unreachable,
|
|
.type => unreachable,
|
|
.enum_literal => unreachable,
|
|
.noreturn => unreachable,
|
|
.undefined => unreachable,
|
|
.null => unreachable,
|
|
.@"opaque" => unreachable,
|
|
|
|
else => {},
|
|
}
|
|
|
|
return .{ .load_uav = .{
|
|
.val = val.toIntern(),
|
|
.orig_ty = (try pt.singleConstPtrType(ty)).toIntern(),
|
|
} };
|
|
}
|
|
|
|
pub fn errUnionPayloadOffset(payload_ty: Type, zcu: *Zcu) u64 {
|
|
if (!payload_ty.hasRuntimeBits(zcu)) return 0;
|
|
const payload_align = payload_ty.abiAlignment(zcu);
|
|
const error_align = Type.anyerror.abiAlignment(zcu);
|
|
if (payload_align.compare(.gte, error_align) or !payload_ty.hasRuntimeBits(zcu)) {
|
|
return 0;
|
|
} else {
|
|
return payload_align.forward(Type.anyerror.abiSize(zcu));
|
|
}
|
|
}
|
|
|
|
pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
|
|
if (!payload_ty.hasRuntimeBits(zcu)) return 0;
|
|
const payload_align = payload_ty.abiAlignment(zcu);
|
|
const error_align = Type.anyerror.abiAlignment(zcu);
|
|
if (payload_align.compare(.gte, error_align) and payload_ty.hasRuntimeBits(zcu)) {
|
|
return error_align.forward(payload_ty.abiSize(zcu));
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
pub fn fieldOffset(ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32, zcu: *Zcu) u64 {
|
|
const agg_ty = ptr_agg_ty.childType(zcu);
|
|
return switch (agg_ty.containerLayout(zcu)) {
|
|
.auto, .@"extern" => agg_ty.structFieldOffset(field_index, zcu),
|
|
.@"packed" => @divExact(@as(u64, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
|
|
(if (zcu.typeToPackedStruct(agg_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
|
|
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
|
|
};
|
|
}
|
|
|
|
test {
|
|
_ = aarch64;
|
|
}
|