(wip) update wasm linker to new Writer API

This commit is contained in:
Andrew Kelley
2025-08-10 21:39:17 -07:00
parent 3280fc98f3
commit 168da23d8f
6 changed files with 873 additions and 1046 deletions
+103 -134
View File
@@ -4,6 +4,7 @@ const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const leb = std.leb;
const Writer = std.Io.Writer;
const Wasm = link.File.Wasm;
const Mir = @import("Mir.zig");
@@ -14,16 +15,16 @@ const codegen = @import("../../codegen.zig");
mir: Mir,
wasm: *Wasm,
/// The binary representation that will be emitted by this module.
code: *std.ArrayListUnmanaged(u8),
/// The binary representation of this module is written here.
writer: *Writer,
pub const Error = error{
OutOfMemory,
WriteFailed,
};
pub fn lowerToCode(emit: *Emit) Error!void {
const mir = &emit.mir;
const code = emit.code;
const writer = emit.writer;
const wasm = emit.wasm;
const comp = wasm.base.comp;
const gpa = comp.gpa;
@@ -41,18 +42,19 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.block, .loop => {
const block_type = datas[inst].block_type;
try code.ensureUnusedCapacity(gpa, 2);
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
code.appendAssumeCapacity(@intFromEnum(block_type));
try writer.writeAll(&.{
@intFromEnum(tags[inst]),
@intFromEnum(block_type),
});
inst += 1;
continue :loop tags[inst];
},
.uav_ref => {
if (is_obj) {
try uavRefObj(wasm, code, datas[inst].ip_index, 0, is_wasm32);
try uavRefObj(wasm, writer, datas[inst].ip_index, 0, is_wasm32);
} else {
try uavRefExe(wasm, code, datas[inst].ip_index, 0, is_wasm32);
try uavRefExe(wasm, writer, datas[inst].ip_index, 0, is_wasm32);
}
inst += 1;
continue :loop tags[inst];
@@ -60,20 +62,20 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.uav_ref_off => {
const extra = mir.extraData(Mir.UavRefOff, datas[inst].payload).data;
if (is_obj) {
try uavRefObj(wasm, code, extra.value, extra.offset, is_wasm32);
try uavRefObj(wasm, writer, extra.value, extra.offset, is_wasm32);
} else {
try uavRefExe(wasm, code, extra.value, extra.offset, is_wasm32);
try uavRefExe(wasm, writer, extra.value, extra.offset, is_wasm32);
}
inst += 1;
continue :loop tags[inst];
},
.nav_ref => {
try navRefOff(wasm, code, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32);
try navRefOff(wasm, writer, .{ .nav_index = datas[inst].nav_index, .offset = 0 }, is_wasm32);
inst += 1;
continue :loop tags[inst];
},
.nav_ref_off => {
try navRefOff(wasm, code, mir.extraData(Mir.NavRefOff, datas[inst].payload).data, is_wasm32);
try navRefOff(wasm, writer, mir.extraData(Mir.NavRefOff, datas[inst].payload).data, is_wasm32);
inst += 1;
continue :loop tags[inst];
},
@@ -81,11 +83,11 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const indirect_func_idx: Wasm.ZcuIndirectFunctionSetIndex = @enumFromInt(
wasm.zcu_indirect_function_set.getIndex(datas[inst].nav_index).?,
);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
if (is_obj) {
@panic("TODO");
} else {
leb.writeUleb128(code.fixedWriter(), 1 + @intFromEnum(indirect_func_idx)) catch unreachable;
try writer.writeLeb128(1 + @intFromEnum(indirect_func_idx));
}
inst += 1;
continue :loop tags[inst];
@@ -95,52 +97,48 @@ pub fn lowerToCode(emit: *Emit) Error!void {
continue :loop tags[inst];
},
.errors_len => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
// MIR is lowered during flush, so there is indeed only one thread at this time.
const errors_len = 1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len;
leb.writeIleb128(code.fixedWriter(), errors_len) catch unreachable;
const errors_len: u32 = @intCast(1 + comp.zcu.?.intern_pool.global_error_set.getNamesFromMainThread().len);
try writer.writeLeb128(@as(i32, @bitCast(errors_len)));
inst += 1;
continue :loop tags[inst];
},
.error_name_table_ref => {
wasm.error_name_table_ref_count += 1;
try code.ensureUnusedCapacity(gpa, 11);
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
code.appendAssumeCapacity(@intFromEnum(opcode));
try writer.writeByte(@intFromEnum(opcode));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.errorNameTableSymbolIndex() },
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
.addend = 0,
});
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
try writer.splatByteAll(0, if (is_wasm32) 5 else 10);
inst += 1;
continue :loop tags[inst];
} else {
const addr: u32 = wasm.errorNameTableAddr();
leb.writeIleb128(code.fixedWriter(), addr) catch unreachable;
try writer.writeLeb128(@as(i32, @bitCast(addr)));
inst += 1;
continue :loop tags[inst];
}
},
.br_if, .br, .memory_grow, .memory_size => {
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
leb.writeUleb128(code.fixedWriter(), datas[inst].label) catch unreachable;
try writer.writeByte(@intFromEnum(tags[inst]));
try writer.writeLeb128(datas[inst].label);
inst += 1;
continue :loop tags[inst];
},
.local_get, .local_set, .local_tee => {
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
leb.writeUleb128(code.fixedWriter(), datas[inst].local) catch unreachable;
try writer.writeByte(@intFromEnum(tags[inst]));
try writer.writeLeb128(datas[inst].local);
inst += 1;
continue :loop tags[inst];
@@ -150,29 +148,27 @@ pub fn lowerToCode(emit: *Emit) Error!void {
const extra_index = datas[inst].payload;
const extra = mir.extraData(Mir.JumpTable, extra_index);
const labels = mir.extra[extra.end..][0..extra.data.length];
try code.ensureUnusedCapacity(gpa, 11 + 10 * labels.len);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.br_table));
// -1 because default label is not part of length/depth.
leb.writeUleb128(code.fixedWriter(), extra.data.length - 1) catch unreachable;
for (labels) |label| leb.writeUleb128(code.fixedWriter(), label) catch unreachable;
try writer.writeLeb128(extra.data.length - 1);
for (labels) |label| try writer.writeLeb128(label);
inst += 1;
continue :loop tags[inst];
},
.call_nav => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.call));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(datas[inst].nav_index) },
.tag = .function_index_leb,
.addend = 0,
});
code.appendNTimesAssumeCapacity(0, 5);
try writer.splatByteAll(0, 5);
} else {
appendOutputFunctionIndex(code, .fromIpNav(wasm, datas[inst].nav_index));
try appendOutputFunctionIndex(writer, .fromIpNav(wasm, datas[inst].nav_index));
}
inst += 1;
@@ -180,7 +176,6 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.call_indirect => {
try code.ensureUnusedCapacity(gpa, 11);
const fn_info = comp.zcu.?.typeToFunc(.fromInterned(datas[inst].ip_index)).?;
const func_ty_index = wasm.getExistingFunctionType(
fn_info.cc,
@@ -188,38 +183,37 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.fromInterned(fn_info.return_type),
target,
).?;
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call_indirect));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.call_indirect));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .type_index = func_ty_index },
.tag = .type_index_leb,
.addend = 0,
});
code.appendNTimesAssumeCapacity(0, 5);
try writer.splatByteAll(0, 5);
} else {
const index: Wasm.Flush.FuncTypeIndex = .fromTypeIndex(func_ty_index, &wasm.flush_buffer);
leb.writeUleb128(code.fixedWriter(), @intFromEnum(index)) catch unreachable;
try writer.writeLeb128(@intFromEnum(index));
}
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // table index
try writer.writeUleb128(0); // table index
inst += 1;
continue :loop tags[inst];
},
.call_tag_name => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.call));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.tagNameSymbolIndex(datas[inst].ip_index) },
.tag = .function_index_leb,
.addend = 0,
});
code.appendNTimesAssumeCapacity(0, 5);
try writer.splatByteAll(0, 5);
} else {
appendOutputFunctionIndex(code, .fromTagNameType(wasm, datas[inst].ip_index));
try appendOutputFunctionIndex(writer, .fromTagNameType(wasm, datas[inst].ip_index));
}
inst += 1;
@@ -232,18 +226,17 @@ pub fn lowerToCode(emit: *Emit) Error!void {
// table initialized based on the `Mir.Intrinsic` enum.
const symbol_name = try wasm.internString(@tagName(datas[inst].intrinsic));
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.call));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.symbolNameIndex(symbol_name) },
.tag = .function_index_leb,
.addend = 0,
});
code.appendNTimesAssumeCapacity(0, 5);
try writer.splatByteAll(0, 5);
} else {
appendOutputFunctionIndex(code, .fromSymbolName(wasm, symbol_name));
try appendOutputFunctionIndex(writer, .fromSymbolName(wasm, symbol_name));
}
inst += 1;
@@ -251,19 +244,17 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.global_set_sp => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.global_set));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.stackPointerSymbolIndex() },
.tag = .global_index_leb,
.addend = 0,
});
code.appendNTimesAssumeCapacity(0, 5);
try writer.splatByteAll(0, 5);
} else {
const sp_global: Wasm.GlobalIndex = .stack_pointer;
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
try writer.writeLeb128(@intFromEnum(Wasm.GlobalIndex.stack_pointer));
}
inst += 1;
@@ -271,36 +262,32 @@ pub fn lowerToCode(emit: *Emit) Error!void {
},
.f32_const => {
try code.ensureUnusedCapacity(gpa, 5);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f32_const));
std.mem.writeInt(u32, code.addManyAsArrayAssumeCapacity(4), @bitCast(datas[inst].float32), .little);
try writer.writeByte(@intFromEnum(std.wasm.Opcode.f32_const));
try writer.writeInt(u32, @bitCast(datas[inst].float32), .little);
inst += 1;
continue :loop tags[inst];
},
.f64_const => {
try code.ensureUnusedCapacity(gpa, 9);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.f64_const));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.f64_const));
const float64 = mir.extraData(Mir.Float64, datas[inst].payload).data;
std.mem.writeInt(u64, code.addManyAsArrayAssumeCapacity(8), float64.toInt(), .little);
try writer.writeInt(u64, float64.toInt(), .little);
inst += 1;
continue :loop tags[inst];
},
.i32_const => {
try code.ensureUnusedCapacity(gpa, 6);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), datas[inst].imm32) catch unreachable;
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try writer.writeLeb128(datas[inst].imm32);
inst += 1;
continue :loop tags[inst];
},
.i64_const => {
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
const int64: i64 = @bitCast(mir.extraData(Mir.Imm64, datas[inst].payload).data.toInt());
leb.writeIleb128(code.fixedWriter(), int64) catch unreachable;
try writer.writeLeb128(int64);
inst += 1;
continue :loop tags[inst];
@@ -330,9 +317,8 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.i64_store16,
.i64_store32,
=> {
try code.ensureUnusedCapacity(gpa, 1 + 20);
code.appendAssumeCapacity(@intFromEnum(tags[inst]));
encodeMemArg(code, mir.extraData(Mir.MemArg, datas[inst].payload).data);
try writer.writeByte(@intFromEnum(tags[inst]));
try encodeMemArg(writer, mir.extraData(Mir.MemArg, datas[inst].payload).data);
inst += 1;
continue :loop tags[inst];
},
@@ -466,43 +452,42 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.i64_clz,
.i64_ctz,
=> {
try code.append(gpa, @intFromEnum(tags[inst]));
try writer.writeByte(@intFromEnum(tags[inst]));
inst += 1;
continue :loop tags[inst];
},
.misc_prefix => {
try code.ensureUnusedCapacity(gpa, 6 + 6);
const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
switch (@as(std.wasm.MiscOpcode, @enumFromInt(opcode))) {
const opcode: std.wasm.MiscOpcode = @enumFromInt(mir.extra[extra_index]);
try writer.writeByte(@intFromEnum(std.wasm.Opcode.misc_prefix));
try writer.writeLeb128(@intFromEnum(opcode));
switch (opcode) {
// bulk-memory opcodes
.data_drop => {
const segment = mir.extra[extra_index + 1];
leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
try writer.writeLeb128(segment);
inst += 1;
continue :loop tags[inst];
},
.memory_init => {
const segment = mir.extra[extra_index + 1];
leb.writeUleb128(code.fixedWriter(), segment) catch unreachable;
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
try writer.writeLeb128(segment);
try writer.writeByte(0); // memory index
inst += 1;
continue :loop tags[inst];
},
.memory_fill => {
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // memory index
try writer.writeByte(0); // memory index
inst += 1;
continue :loop tags[inst];
},
.memory_copy => {
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // dst memory index
leb.writeUleb128(code.fixedWriter(), @as(u32, 0)) catch unreachable; // src memory index
try writer.writeByte(0); // dst memory index
try writer.writeByte(0); // src memory index
inst += 1;
continue :loop tags[inst];
@@ -534,12 +519,11 @@ pub fn lowerToCode(emit: *Emit) Error!void {
comptime unreachable;
},
.simd_prefix => {
try code.ensureUnusedCapacity(gpa, 6 + 20);
const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.simd_prefix));
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
switch (@as(std.wasm.SimdOpcode, @enumFromInt(opcode))) {
const opcode: std.wasm.SimdOpcode = @enumFromInt(mir.extra[extra_index]);
try writer.writeByte(@intFromEnum(std.wasm.Opcode.simd_prefix));
try writer.writeLeb128(@intFromEnum(opcode));
switch (opcode) {
.v128_store,
.v128_load,
.v128_load8_splat,
@@ -547,12 +531,12 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.v128_load32_splat,
.v128_load64_splat,
=> {
encodeMemArg(code, mir.extraData(Mir.MemArg, extra_index + 1).data);
try encodeMemArg(writer, mir.extraData(Mir.MemArg, extra_index + 1).data);
inst += 1;
continue :loop tags[inst];
},
.v128_const, .i8x16_shuffle => {
code.appendSliceAssumeCapacity(std.mem.asBytes(mir.extra[extra_index + 1 ..][0..4]));
try writer.writeAll(std.mem.asBytes(mir.extra[extra_index + 1 ..][0..4]));
inst += 1;
continue :loop tags[inst];
},
@@ -571,7 +555,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.f64x2_extract_lane,
.f64x2_replace_lane,
=> {
code.appendAssumeCapacity(@intCast(mir.extra[extra_index + 1]));
try writer.writeByte(@intCast(mir.extra[extra_index + 1]));
inst += 1;
continue :loop tags[inst];
},
@@ -819,13 +803,11 @@ pub fn lowerToCode(emit: *Emit) Error!void {
comptime unreachable;
},
.atomics_prefix => {
try code.ensureUnusedCapacity(gpa, 6 + 20);
const extra_index = datas[inst].payload;
const opcode = mir.extra[extra_index];
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
leb.writeUleb128(code.fixedWriter(), opcode) catch unreachable;
switch (@as(std.wasm.AtomicsOpcode, @enumFromInt(opcode))) {
const opcode: std.wasm.AtomicsOpcode = @enumFromInt(mir.extra[extra_index]);
try writer.writeByte(@intFromEnum(std.wasm.Opcode.atomics_prefix));
try writer.writeLeb128(@intFromEnum(opcode));
switch (opcode) {
.i32_atomic_load,
.i64_atomic_load,
.i32_atomic_load8_u,
@@ -892,15 +874,12 @@ pub fn lowerToCode(emit: *Emit) Error!void {
.i64_atomic_rmw32_cmpxchg_u,
=> {
const mem_arg = mir.extraData(Mir.MemArg, extra_index + 1).data;
encodeMemArg(code, mem_arg);
try encodeMemArg(writer, mem_arg);
inst += 1;
continue :loop tags[inst];
},
.atomic_fence => {
// Hard-codes memory index 0 since multi-memory proposal is
// not yet accepted nor implemented.
const memory_index: u32 = 0;
leb.writeUleb128(code.fixedWriter(), memory_index) catch unreachable;
try writer.writeByte(0); // memory index
inst += 1;
continue :loop tags[inst];
},
@@ -915,44 +894,36 @@ pub fn lowerToCode(emit: *Emit) Error!void {
}
/// Asserts 20 unused capacity.
fn encodeMemArg(code: *std.ArrayListUnmanaged(u8), mem_arg: Mir.MemArg) void {
assert(code.unusedCapacitySlice().len >= 20);
// Wasm encodes alignment as power of 2, rather than natural alignment.
const encoded_alignment = @ctz(mem_arg.alignment);
leb.writeUleb128(code.fixedWriter(), encoded_alignment) catch unreachable;
leb.writeUleb128(code.fixedWriter(), mem_arg.offset) catch unreachable;
fn encodeMemArg(writer: *Writer, mem_arg: Mir.MemArg) Writer.Error!void {
try writer.writeLeb128(Wasm.Alignment.fromNonzeroByteUnits(mem_arg.alignment).toLog2Units());
try writer.writeLeb128(mem_arg.offset);
}
fn uavRefObj(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
fn uavRefObj(wasm: *Wasm, writer: *Writer, value: InternPool.Index, offset: i32, is_wasm32: bool) Writer.Error!void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(opcode));
try writer.writeByte(@intFromEnum(opcode));
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.uavSymbolIndex(value) },
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
.addend = offset,
});
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
try writer.splatByteAll(0, if (is_wasm32) 5 else 10);
}
fn uavRefExe(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
fn uavRefExe(wasm: *Wasm, writer: *Writer, value: InternPool.Index, offset: i32, is_wasm32: bool) !void {
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
try code.ensureUnusedCapacity(gpa, 11);
code.appendAssumeCapacity(@intFromEnum(opcode));
try writer.writeByte(@intFromEnum(opcode));
const addr = wasm.uavAddr(value);
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + offset))) catch unreachable;
try writer.writeLeb128(@as(u32, @intCast(@as(i64, addr) + offset)));
}
fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff, is_wasm32: bool) !void {
fn navRefOff(wasm: *Wasm, writer: *Writer, data: Mir.NavRefOff, is_wasm32: bool) !void {
const comp = wasm.base.comp;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
@@ -961,24 +932,22 @@ fn navRefOff(wasm: *Wasm, code: *std.ArrayListUnmanaged(u8), data: Mir.NavRefOff
const nav_ty = ip.getNav(data.nav_index).typeOf(ip);
assert(!ip.isFunctionType(nav_ty));
try code.ensureUnusedCapacity(gpa, 11);
const opcode: std.wasm.Opcode = if (is_wasm32) .i32_const else .i64_const;
code.appendAssumeCapacity(@intFromEnum(opcode));
try writer.writeByte(@intFromEnum(opcode));
if (is_obj) {
try wasm.out_relocs.append(gpa, .{
.offset = @intCast(code.items.len),
.offset = @intCast(writer.count),
.pointee = .{ .symbol_index = try wasm.navSymbolIndex(data.nav_index) },
.tag = if (is_wasm32) .memory_addr_leb else .memory_addr_leb64,
.addend = data.offset,
});
code.appendNTimesAssumeCapacity(0, if (is_wasm32) 5 else 10);
try writer.splatByteAll(0, if (is_wasm32) 5 else 10);
} else {
const addr = wasm.navAddr(data.nav_index);
leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(@as(i64, addr) + data.offset))) catch unreachable;
try writer.writeLeb128(@as(i32, @bitCast(@as(u32, @intCast(@as(i64, addr) + data.offset)))));
}
}
fn appendOutputFunctionIndex(code: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) void {
leb.writeUleb128(code.fixedWriter(), @intFromEnum(i)) catch unreachable;
fn appendOutputFunctionIndex(writer: *Writer, i: Wasm.OutputFunctionIndex) Writer.Error!void {
return writer.writeLeb128(@intFromEnum(i));
}
+20 -22
View File
@@ -669,16 +669,14 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.* = undefined;
}
pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) std.mem.Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
pub fn lower(mir: *const Mir, wasm: *Wasm, writer: *std.Io.Writer) std.Io.Writer.Error!void {
// Write the locals in the prologue of the function body.
try code.ensureUnusedCapacity(gpa, 5 + mir.locals.len * 6 + 38);
_ = try writer.writableSliceGreedy(5 + mir.locals.len * 6 + 38);
std.leb.writeUleb128(code.fixedWriter(), @as(u32, @intCast(mir.locals.len))) catch unreachable;
writer.writeLeb128(@as(u32, @intCast(mir.locals.len))) catch unreachable;
for (mir.locals) |local| {
std.leb.writeUleb128(code.fixedWriter(), @as(u32, 1)) catch unreachable;
code.appendAssumeCapacity(@intFromEnum(local));
writer.writeLeb128(@as(u32, 1)) catch unreachable;
writer.writeByte(@intFromEnum(local)) catch unreachable;
}
// Stack management section of function prologue.
@@ -686,37 +684,37 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
if (stack_alignment.toByteUnits()) |align_bytes| {
const sp_global: Wasm.GlobalIndex = .stack_pointer;
// load stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
writer.writeByte(@intFromEnum(std.wasm.Opcode.global_get)) catch unreachable;
writer.writeLeb128(@intFromEnum(sp_global)) catch unreachable;
// store stack pointer so we can restore it when we return from the function
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), mir.prologue.sp_local) catch unreachable;
writer.writeByte(@intFromEnum(std.wasm.Opcode.local_tee)) catch unreachable;
writer.writeLeb128(mir.prologue.sp_local) catch unreachable;
// get the total stack size
const aligned_stack: i32 = @intCast(stack_alignment.forward(mir.prologue.stack_size));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), aligned_stack) catch unreachable;
writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)) catch unreachable;
writer.writeLeb128(aligned_stack) catch unreachable;
// subtract it from the current stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_sub));
writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_sub)) catch unreachable;
// Get negative stack alignment
const neg_stack_align = @as(i32, @intCast(align_bytes)) * -1;
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(code.fixedWriter(), neg_stack_align) catch unreachable;
writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const)) catch unreachable;
writer.writeLeb128(neg_stack_align) catch unreachable;
// Bitwise-and the value to get the new stack pointer to ensure the
// pointers are aligned with the abi alignment.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_and));
writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_and)) catch unreachable;
// The bottom will be used to calculate all stack pointer offsets.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), mir.prologue.bottom_stack_local) catch unreachable;
writer.writeByte(@intFromEnum(std.wasm.Opcode.local_tee)) catch unreachable;
writer.writeLeb128(mir.prologue.bottom_stack_local) catch unreachable;
// Store the current stack pointer value into the global stack pointer so other function calls will
// start from this value instead and not overwrite the current stack.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
writer.writeByte(@intFromEnum(std.wasm.Opcode.global_set)) catch unreachable;
writer.writeLeb128(@intFromEnum(sp_global)) catch unreachable;
}
var emit: Emit = .{
.mir = mir.*,
.wasm = wasm,
.code = code,
.writer = writer,
};
try emit.lowerToCode();
}
+11 -12
View File
@@ -28,6 +28,7 @@ const fs = std.fs;
const leb = std.leb;
const log = std.log.scoped(.link);
const mem = std.mem;
const Writer = std.Io.Writer;
const Mir = @import("../arch/wasm/Mir.zig");
const CodeGen = @import("../arch/wasm/CodeGen.zig");
@@ -2087,11 +2088,9 @@ pub const Expr = enum(u32) {
pub const end = @intFromEnum(std.wasm.Opcode.end);
pub fn slice(index: Expr, wasm: *const Wasm) [:end]const u8 {
const start_slice = wasm.string_bytes.items[@intFromEnum(index)..];
const end_pos = Object.exprEndPos(start_slice, 0) catch |err| switch (err) {
error.InvalidInitOpcode => unreachable,
};
return start_slice[0..end_pos :end];
var r: std.Io.Reader = .fixed(wasm.string_bytes.items[@intFromEnum(index)..]);
Object.skipInit(&r) catch unreachable;
return r.buffered()[0 .. r.seek - 1 :end];
}
};
@@ -2126,7 +2125,7 @@ pub const FunctionType = extern struct {
wasm: *const Wasm,
ft: FunctionType,
pub fn format(self: Formatter, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Formatter, writer: *Writer) Writer.Error!void {
const params = self.ft.params.slice(self.wasm);
const returns = self.ft.returns.slice(self.wasm);
@@ -2905,7 +2904,7 @@ pub const Feature = packed struct(u8) {
@"=",
};
pub fn format(feature: Feature, writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(feature: Feature, writer: *Writer) Writer.Error!void {
try writer.print("{s} {s}", .{ @tagName(feature.prefix), @tagName(feature.tag) });
}
@@ -3037,16 +3036,16 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void {
const stat = try obj.file.stat();
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
const file_contents = try gpa.alloc(u8, size);
defer gpa.free(file_contents);
var br: std.Io.Reader = .fixed(try gpa.alloc(u8, size));
defer gpa.free(br.buffered());
const n = try obj.file.preadAll(file_contents, 0);
if (n != file_contents.len) return error.UnexpectedEndOfFile;
const n = try obj.file.preadAll(br.buffered(), 0);
if (n != br.bufferedLen()) return error.UnexpectedEndOfFile;
var ss: Object.ScratchSpace = .{};
defer ss.deinit(gpa);
const object = try Object.parse(wasm, file_contents, obj.path, null, wasm.object_host_name, &ss, obj.must_link, gc_sections);
const object = try Object.parse(wasm, &br, obj.path, null, wasm.object_host_name, &ss, obj.must_link, gc_sections);
wasm.objects.appendAssumeCapacity(object);
}
+2 -3
View File
@@ -167,9 +167,8 @@ pub fn parseObject(
};
const object_file_size = try header.parsedSize();
const contents = file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size];
return Object.parse(wasm, contents, path, object_name, host_name, scratch_space, must_link, gc_sections);
var r: std.io.Reader = .fixed(file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size]);
return Object.parse(wasm, &r, path, object_name, host_name, scratch_space, must_link, gc_sections);
}
const Archive = @This();
+469 -554
View File
@@ -16,9 +16,9 @@ const build_options = @import("build_options");
const std = @import("std");
const Allocator = std.mem.Allocator;
const mem = std.mem;
const leb = std.leb;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
const Writer = std.Io.Writer;
/// Ordered list of data segments that will appear in the final binary.
/// When sorted, to-be-merged segments will be made adjacent.
@@ -557,13 +557,12 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// Index of the data section. Used to tell relocation table where the section lives.
var data_section_index: ?u32 = null;
const binary_bytes = &f.binary_bytes;
assert(binary_bytes.items.len == 0);
assert(f.binary_bytes.items.len == 0);
var aw: Writer.Allocating = .fromArrayList(gpa, &f.binary_bytes);
defer f.binary_bytes = aw.toArrayList();
const w = &aw.writer;
try binary_bytes.appendSlice(gpa, &std.wasm.magic ++ &std.wasm.version);
assert(binary_bytes.items.len == 8);
const binary_writer = binary_bytes.writer(gpa);
try w.writeAll(&std.wasm.magic ++ &std.wasm.version);
// Type section.
for (f.function_imports.values()) |id| {
@@ -573,22 +572,18 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
try f.func_types.put(gpa, function.typeIndex(wasm), {});
}
if (f.func_types.entries.len != 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
for (f.func_types.keys()) |func_type_index| {
const func_type = func_type_index.ptr(wasm);
try leb.writeUleb128(binary_writer, std.wasm.function_type);
try w.writeLeb128(std.wasm.function_type);
const params = func_type.params.slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(params.len)));
for (params) |param_ty| {
try leb.writeUleb128(binary_writer, @intFromEnum(param_ty));
}
try w.writeLeb128(params.len);
for (params) |param_ty| try w.writeLeb128(@intFromEnum(param_ty));
const returns = func_type.returns.slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(returns.len)));
for (returns) |ret_ty| {
try leb.writeUleb128(binary_writer, @intFromEnum(ret_ty));
}
try w.writeLeb128(returns.len);
for (returns) |ret_ty| try w.writeLeb128(@intFromEnum(ret_ty));
}
replaceVecSectionHeader(binary_bytes, header_offset, .type, @intCast(f.func_types.entries.len));
replaceVecSectionHeader(&aw, header_offset, .type, @intCast(f.func_types.entries.len));
section_index += 1;
}
@@ -601,42 +596,42 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// Import section
{
var total_imports: usize = 0;
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
for (f.function_imports.values()) |id| {
const module_name = id.moduleName(wasm).slice(wasm).?;
try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
try binary_writer.writeAll(module_name);
try w.writeLeb128(module_name.len);
try w.writeAll(module_name);
const name = id.importName(wasm).slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_writer.writeAll(name);
try w.writeLeb128(name.len);
try w.writeAll(name);
try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.function));
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.function));
const type_index: FuncTypeIndex = .fromTypeIndex(id.functionType(wasm), f);
try leb.writeUleb128(binary_writer, @intFromEnum(type_index));
try w.writeLeb128(@intFromEnum(type_index));
}
total_imports += f.function_imports.entries.len;
for (wasm.table_imports.values()) |id| {
const table_import = id.value(wasm);
const module_name = table_import.module_name.slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
try binary_writer.writeAll(module_name);
try w.writeLeb128(module_name.len);
try w.writeAll(module_name);
const name = table_import.name.slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_writer.writeAll(name);
try w.writeLeb128(name.len);
try w.writeAll(name);
try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.table));
try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
try emitLimits(gpa, binary_bytes, table_import.limits());
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.table));
try w.writeLeb128(@intFromEnum(@as(std.wasm.RefType, table_import.flags.ref_type.to())));
try emitLimits(w, table_import.limits());
}
total_imports += wasm.table_imports.entries.len;
if (import_memory) {
const name = if (is_obj) wasm.preloaded_strings.__linear_memory else wasm.preloaded_strings.memory;
try emitMemoryImport(wasm, binary_bytes, name, &.{
try emitMemoryImport(wasm, w, name, &.{
// TODO the import_memory option needs to specify from which module
.module_name = wasm.object_host_name.unwrap().?,
.limits_min = wasm.memories.limits.min,
@@ -650,215 +645,209 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
for (f.global_imports.values()) |id| {
const module_name = id.moduleName(wasm).slice(wasm).?;
try leb.writeUleb128(binary_writer, @as(u32, @intCast(module_name.len)));
try binary_writer.writeAll(module_name);
try w.writeLeb128(module_name.len);
try w.writeAll(module_name);
const name = id.importName(wasm).slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_writer.writeAll(name);
try w.writeLeb128(name.len);
try w.writeAll(name);
try binary_writer.writeByte(@intFromEnum(std.wasm.ExternalKind.global));
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.global));
const global_type = id.globalType(wasm);
try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.Valtype, global_type.valtype)));
try binary_writer.writeByte(@intFromBool(global_type.mutable));
try w.writeLeb128(@intFromEnum(global_type.valtype));
try w.writeByte(@intFromBool(global_type.mutable));
}
total_imports += f.global_imports.entries.len;
if (total_imports > 0) {
replaceVecSectionHeader(binary_bytes, header_offset, .import, @intCast(total_imports));
replaceVecSectionHeader(&aw, header_offset, .import, @intCast(total_imports));
section_index += 1;
} else {
binary_bytes.shrinkRetainingCapacity(header_offset);
aw.shrinkRetainingCapacity(header_offset);
}
}
// Function section
if (wasm.functions.count() != 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
for (wasm.functions.keys()) |function| {
const index: FuncTypeIndex = .fromTypeIndex(function.typeIndex(wasm), f);
try leb.writeUleb128(binary_writer, @intFromEnum(index));
try w.writeLeb128(@intFromEnum(index));
}
replaceVecSectionHeader(binary_bytes, header_offset, .function, @intCast(wasm.functions.count()));
replaceVecSectionHeader(&aw, header_offset, .function, @intCast(wasm.functions.count()));
section_index += 1;
}
// Table section
if (wasm.tables.entries.len > 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
for (wasm.tables.keys()) |table| {
try leb.writeUleb128(binary_writer, @intFromEnum(@as(std.wasm.RefType, table.refType(wasm))));
try emitLimits(gpa, binary_bytes, table.limits(wasm));
try w.writeLeb128(@intFromEnum(table.refType(wasm)));
try emitLimits(w, table.limits(wasm));
}
replaceVecSectionHeader(binary_bytes, header_offset, .table, @intCast(wasm.tables.entries.len));
replaceVecSectionHeader(&aw, header_offset, .table, @intCast(wasm.tables.entries.len));
section_index += 1;
}
// Memory section. wasm currently only supports 1 linear memory segment.
if (!import_memory) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
try emitLimits(gpa, binary_bytes, wasm.memories.limits);
replaceVecSectionHeader(binary_bytes, header_offset, .memory, 1);
const header_offset = try reserveVecSectionHeader(w);
try emitLimits(w, wasm.memories.limits);
replaceVecSectionHeader(&aw, header_offset, .memory, 1);
section_index += 1;
}
// Global section.
const globals_len: u32 = @intCast(wasm.globals.entries.len);
if (globals_len > 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
for (wasm.globals.keys()) |global_resolution| {
switch (global_resolution.unpack(wasm)) {
.unresolved => unreachable,
.__heap_base => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.heap_base),
.__heap_end => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.heap_end),
.__stack_pointer => try appendGlobal(gpa, binary_bytes, 1, virtual_addrs.stack_pointer),
.__tls_align => try appendGlobal(gpa, binary_bytes, 0, @intCast(virtual_addrs.tls_align.toByteUnits().?)),
.__tls_base => try appendGlobal(gpa, binary_bytes, 1, virtual_addrs.tls_base.?),
.__tls_size => try appendGlobal(gpa, binary_bytes, 0, virtual_addrs.tls_size.?),
.__heap_base => try appendGlobal(w, false, virtual_addrs.heap_base),
.__heap_end => try appendGlobal(w, false, virtual_addrs.heap_end),
.__stack_pointer => try appendGlobal(w, true, virtual_addrs.stack_pointer),
.__tls_align => try appendGlobal(w, false, @intCast(virtual_addrs.tls_align.toByteUnits().?)),
.__tls_base => try appendGlobal(w, true, virtual_addrs.tls_base.?),
.__tls_size => try appendGlobal(w, false, virtual_addrs.tls_size.?),
.object_global => |i| {
const global = i.ptr(wasm);
try binary_bytes.appendSlice(gpa, &.{
try w.writeAll(&.{
@intFromEnum(@as(std.wasm.Valtype, global.flags.global_type.valtype.to())),
@intFromBool(global.flags.global_type.mutable),
});
try emitExpr(wasm, binary_bytes, global.expr);
try emitExpr(wasm, w, global.expr);
},
.nav_exe => unreachable, // Zig source code currently cannot represent this.
.nav_obj => unreachable, // Zig source code currently cannot represent this.
}
}
replaceVecSectionHeader(binary_bytes, header_offset, .global, globals_len);
replaceVecSectionHeader(&aw, header_offset, .global, globals_len);
section_index += 1;
}
// Export section
{
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
var exports_len: usize = 0;
for (wasm.function_exports.keys(), wasm.function_exports.values()) |exp_name, function_index| {
const name = exp_name.slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.function));
try w.writeLeb128(name.len);
try w.writeAll(name);
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.function));
const func_index = Wasm.OutputFunctionIndex.fromFunctionIndex(wasm, function_index);
try leb.writeUleb128(binary_writer, @intFromEnum(func_index));
try w.writeLeb128(@intFromEnum(func_index));
}
exports_len += wasm.function_exports.entries.len;
if (wasm.export_table and f.indirect_function_table.entries.len > 0) {
const name = "__indirect_function_table";
const index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.table));
try leb.writeUleb128(binary_writer, index);
try w.writeLeb128(name.len);
try w.writeAll(name);
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.table));
try w.writeLeb128(index);
exports_len += 1;
}
if (export_memory) {
const name = "memory";
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
try leb.writeUleb128(binary_writer, @as(u32, 0));
try w.writeLeb128(name.len);
try w.writeAll(name);
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.memory));
try w.writeUleb128(0);
exports_len += 1;
}
for (wasm.global_exports.items) |exp| {
const name = exp.name.slice(wasm);
try leb.writeUleb128(binary_writer, @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.global));
try leb.writeUleb128(binary_writer, @intFromEnum(exp.global_index));
try w.writeLeb128(name.len);
try w.writeAll(name);
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.global));
try w.writeLeb128(@intFromEnum(exp.global_index));
}
exports_len += wasm.global_exports.items.len;
if (exports_len > 0) {
replaceVecSectionHeader(binary_bytes, header_offset, .@"export", @intCast(exports_len));
replaceVecSectionHeader(&aw, header_offset, .@"export", @intCast(exports_len));
section_index += 1;
} else {
binary_bytes.shrinkRetainingCapacity(header_offset);
aw.shrinkRetainingCapacity(header_offset);
}
}
// start section
if (wasm.functions.getIndex(.__wasm_init_memory)) |func_index| {
try emitStartSection(gpa, binary_bytes, .fromFunctionIndex(wasm, @enumFromInt(func_index)));
try emitStartSection(&aw, .fromFunctionIndex(wasm, @enumFromInt(func_index)));
} else if (Wasm.OutputFunctionIndex.fromResolution(wasm, wasm.entry_resolution)) |func_index| {
try emitStartSection(gpa, binary_bytes, func_index);
try emitStartSection(&aw, func_index);
}
// element section
if (f.indirect_function_table.entries.len > 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
// indirect function table elements
const table_index: u32 = @intCast(wasm.tables.getIndex(.__indirect_function_table).?);
// passive with implicit 0-index table or set table index manually
const flags: u32 = if (table_index == 0) 0x0 else 0x02;
try leb.writeUleb128(binary_writer, flags);
if (flags == 0x02) {
try leb.writeUleb128(binary_writer, table_index);
}
try w.writeLeb128(flags);
if (flags == 0x02) try w.writeLeb128(table_index);
// We start at index 1, so unresolved function pointers are invalid
try emitInit(binary_writer, .{ .i32_const = 1 });
if (flags == 0x02) {
try leb.writeUleb128(binary_writer, @as(u8, 0)); // represents funcref
}
try leb.writeUleb128(binary_writer, @as(u32, @intCast(f.indirect_function_table.entries.len)));
for (f.indirect_function_table.keys()) |func_index| {
try leb.writeUleb128(binary_writer, @intFromEnum(func_index));
}
try emitInit(w, .{ .i32_const = 1 });
if (flags == 0x02) try w.writeUleb128(0); // represents funcref
try w.writeLeb128(f.indirect_function_table.entries.len);
for (f.indirect_function_table.keys()) |func_index| try w.writeLeb128(@intFromEnum(func_index));
replaceVecSectionHeader(binary_bytes, header_offset, .element, 1);
replaceVecSectionHeader(&aw, header_offset, .element, 1);
section_index += 1;
}
// When the shared-memory option is enabled, we *must* emit the 'data count' section.
if (f.data_segment_groups.items.len > 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
replaceVecSectionHeader(binary_bytes, header_offset, .data_count, @intCast(f.data_segment_groups.items.len));
const header_offset = try reserveVecSectionHeader(w);
replaceVecSectionHeader(&aw, header_offset, .data_count, @intCast(f.data_segment_groups.items.len));
}
// Code section.
if (wasm.functions.count() != 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
for (wasm.functions.keys()) |resolution| switch (resolution.unpack(wasm)) {
.unresolved => unreachable,
.__wasm_apply_global_tls_relocs => @panic("TODO lower __wasm_apply_global_tls_relocs"),
.__wasm_call_ctors => {
const code_start = try reserveSize(gpa, binary_bytes);
defer replaceSize(binary_bytes, code_start);
try emitCallCtorsFunction(wasm, binary_bytes);
const code_start = try reserveSizeHeader(w);
defer replaceSizeHeader(&aw, code_start);
try emitCallCtorsFunction(wasm, w);
},
.__wasm_init_memory => {
const code_start = try reserveSize(gpa, binary_bytes);
defer replaceSize(binary_bytes, code_start);
try emitInitMemoryFunction(wasm, binary_bytes, &virtual_addrs);
const code_start = try reserveSizeHeader(w);
defer replaceSizeHeader(&aw, code_start);
try emitInitMemoryFunction(wasm, w, &virtual_addrs);
},
.__wasm_init_tls => {
const code_start = try reserveSize(gpa, binary_bytes);
defer replaceSize(binary_bytes, code_start);
try emitInitTlsFunction(wasm, binary_bytes);
const code_start = try reserveSizeHeader(w);
defer replaceSizeHeader(&aw, code_start);
try emitInitTlsFunction(wasm, w);
},
.object_function => |i| {
const ptr = i.ptr(wasm);
const code = ptr.code.slice(wasm);
try leb.writeUleb128(binary_writer, code.len);
const code_start = binary_bytes.items.len;
try binary_bytes.appendSlice(gpa, code);
if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
try w.writeLeb128(code.len);
const code_start = w.end;
try w.writeAll(code);
if (!is_obj) applyRelocs(aw.getWritten()[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
},
.zcu_func => |i| {
const code_start = try reserveSize(gpa, binary_bytes);
defer replaceSize(binary_bytes, code_start);
const code_start = try reserveSizeHeader(w);
defer replaceSizeHeader(&aw, code_start);
log.debug("lowering function code for '{s}'", .{resolution.name(wasm).?});
@@ -867,7 +856,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const ip_index = i.key(wasm).*;
switch (ip.indexToKey(ip_index)) {
.enum_type => {
try emitTagNameFunction(wasm, binary_bytes, f.data_segments.get(.__zig_tag_name_table).?, i.value(wasm).tag_name.table_index, ip_index);
try emitTagNameFunction(wasm, w, f.data_segments.get(.__zig_tag_name_table).?, i.value(wasm).tag_name.table_index, ip_index);
},
else => {
const func = i.value(wasm).function;
@@ -882,13 +871,13 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
.func_tys = undefined,
.error_name_table_ref_count = undefined,
};
try mir.lower(wasm, binary_bytes);
try mir.lower(wasm, w);
},
}
},
};
replaceVecSectionHeader(binary_bytes, header_offset, .code, @intCast(wasm.functions.entries.len));
replaceVecSectionHeader(&aw, header_offset, .code, @intCast(wasm.functions.entries.len));
code_section_index = section_index;
section_index += 1;
}
@@ -924,7 +913,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// Data section.
if (f.data_segment_groups.items.len != 0) {
const header_offset = try reserveVecSectionHeader(gpa, binary_bytes);
const header_offset = try reserveVecSectionHeader(w);
var group_index: u32 = 0;
var segment_offset: u32 = 0;
@@ -932,7 +921,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
var group_end_addr = f.data_segment_groups.items[group_index].end_addr;
for (segment_ids, segment_vaddrs) |segment_id, segment_vaddr| {
if (segment_vaddr >= group_end_addr) {
try binary_bytes.appendNTimes(gpa, 0, group_end_addr - group_start_addr - segment_offset);
try w.splatByteAll(0, group_end_addr - group_start_addr - segment_offset);
group_index += 1;
if (group_index >= f.data_segment_groups.items.len) {
// All remaining segments are zero.
@@ -946,12 +935,10 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
const group_size = group_end_addr - group_start_addr;
log.debug("emit data section group, {d} bytes", .{group_size});
const flags: Object.DataSegmentFlags = if (segment_id.isPassive(wasm)) .passive else .active;
try leb.writeUleb128(binary_writer, @intFromEnum(flags));
try w.writeLeb128(@intFromEnum(flags));
// Passive segments are initialized at runtime.
if (flags != .passive) {
try emitInit(binary_writer, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
}
try leb.writeUleb128(binary_writer, group_size);
if (flags != .passive) try emitInit(w, .{ .i32_const = @as(i32, @bitCast(group_start_addr)) });
try w.writeLeb128(group_size);
}
if (segment_id.isEmpty(wasm)) {
// It counted for virtual memory but it does not go into the binary.
@@ -960,62 +947,62 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
// Padding for alignment.
const needed_offset = segment_vaddr - group_start_addr;
try binary_bytes.appendNTimes(gpa, 0, needed_offset - segment_offset);
try w.splatByteAll(0, needed_offset - segment_offset);
segment_offset = needed_offset;
const code_start = binary_bytes.items.len;
const code_start = w.end;
append: {
const code = switch (segment_id.unpack(wasm)) {
.__heap_base => {
mem.writeInt(u32, try binary_bytes.addManyAsArray(gpa, 4), virtual_addrs.heap_base, .little);
try w.writeInt(u32, virtual_addrs.heap_base, .little);
break :append;
},
.__heap_end => {
mem.writeInt(u32, try binary_bytes.addManyAsArray(gpa, 4), virtual_addrs.heap_end, .little);
try w.writeInt(u32, virtual_addrs.heap_end, .little);
break :append;
},
.__zig_error_names => {
try binary_bytes.appendSlice(gpa, wasm.error_name_bytes.items);
try w.writeAll(wasm.error_name_bytes.items);
break :append;
},
.__zig_error_name_table => {
if (is_obj) @panic("TODO error name table reloc");
const base = f.data_segments.get(.__zig_error_names).?;
if (!is64) {
try emitTagNameTable(gpa, binary_bytes, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u32);
try emitTagNameTable(w, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u32);
} else {
try emitTagNameTable(gpa, binary_bytes, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u64);
try emitTagNameTable(w, wasm.error_name_offs.items, wasm.error_name_bytes.items, base, u64);
}
break :append;
},
.__zig_tag_names => {
try binary_bytes.appendSlice(gpa, wasm.tag_name_bytes.items);
try w.writeAll(wasm.tag_name_bytes.items);
break :append;
},
.__zig_tag_name_table => {
if (is_obj) @panic("TODO tag name table reloc");
const base = f.data_segments.get(.__zig_tag_names).?;
if (!is64) {
try emitTagNameTable(gpa, binary_bytes, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u32);
try emitTagNameTable(w, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u32);
} else {
try emitTagNameTable(gpa, binary_bytes, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u64);
try emitTagNameTable(w, wasm.tag_name_offs.items, wasm.tag_name_bytes.items, base, u64);
}
break :append;
},
.object => |i| {
const ptr = i.ptr(wasm);
try binary_bytes.appendSlice(gpa, ptr.payload.slice(wasm));
if (!is_obj) applyRelocs(binary_bytes.items[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
try w.writeAll(ptr.payload.slice(wasm));
if (!is_obj) applyRelocs(aw.getWritten()[code_start..], ptr.offset, ptr.relocations(wasm), wasm);
break :append;
},
inline .uav_exe, .uav_obj, .nav_exe, .nav_obj => |i| i.value(wasm).code,
};
try binary_bytes.appendSlice(gpa, code.slice(wasm));
try w.writeAll(code.slice(wasm));
}
segment_offset += @intCast(binary_bytes.items.len - code_start);
segment_offset += @intCast(w.end - code_start);
}
replaceVecSectionHeader(binary_bytes, header_offset, .data, @intCast(f.data_segment_groups.items.len));
replaceVecSectionHeader(&aw, header_offset, .data, @intCast(f.data_segment_groups.items.len));
data_section_index = section_index;
section_index += 1;
}
@@ -1023,7 +1010,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
if (is_obj) {
@panic("TODO emit link section for object file and emit modified relocations");
} else if (comp.config.debug_format != .strip) {
try emitNameSection(wasm, f.data_segment_groups.items, binary_bytes);
try emitNameSection(wasm, &aw, f.data_segment_groups.items);
}
if (comp.config.debug_format != .strip) {
@@ -1033,17 +1020,17 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
.none => {},
.fast => {
var id: [16]u8 = undefined;
std.crypto.hash.sha3.TurboShake128(null).hash(binary_bytes.items, &id, .{});
std.crypto.hash.sha3.TurboShake128(null).hash(w.buffered(), &id, .{});
var uuid: [36]u8 = undefined;
_ = try std.fmt.bufPrint(&uuid, "{x}-{x}-{x}-{x}-{x}", .{
id[0..4], id[4..6], id[6..8], id[8..10], id[10..],
});
try emitBuildIdSection(gpa, binary_bytes, &uuid);
try emitBuildIdSection(&aw, &uuid);
},
.hexstring => |hs| {
var buffer: [32 * 2]u8 = undefined;
const str = std.fmt.bufPrint(&buffer, "{x}", .{hs.toSlice()}) catch unreachable;
try emitBuildIdSection(gpa, binary_bytes, str);
try emitBuildIdSection(&aw, str);
},
else => |mode| {
var err = try diags.addErrorWithNotes(0);
@@ -1054,14 +1041,17 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
var debug_bytes = std.ArrayList(u8).init(gpa);
defer debug_bytes.deinit();
try emitProducerSection(gpa, binary_bytes);
try emitFeaturesSection(gpa, binary_bytes, target);
try emitProducerSection(&aw);
emitFeaturesSection(&aw, target) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
}
// Finally, write the entire binary into the file.
const file = wasm.base.file.?;
try file.pwriteAll(binary_bytes.items, 0);
try file.setEndPos(binary_bytes.items.len);
const contents = aw.getWritten();
try file.setEndPos(contents.len);
try file.pwriteAll(contents, 0);
}
const VirtualAddrs = struct {
@@ -1076,170 +1066,155 @@ const VirtualAddrs = struct {
fn emitNameSection(
wasm: *Wasm,
aw: *Writer.Allocating,
data_segment_groups: []const DataSegmentGroup,
binary_bytes: *std.ArrayListUnmanaged(u8),
) !void {
const f = &wasm.flush_buffer;
const comp = wasm.base.comp;
const gpa = comp.gpa;
const w = &aw.writer;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(std.wasm.Section.custom));
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
const name_name = "name";
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, name_name.len));
try binary_bytes.appendSlice(gpa, name_name);
const section_name = "name";
try w.writeLeb128(section_name.len);
try w.writeAll(section_name);
{
const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.function));
const total_functions: u32 = @intCast(f.function_imports.entries.len + wasm.functions.entries.len);
try leb.writeUleb128(binary_bytes.writer(gpa), total_functions);
const sub_header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, sub_header_offset, @intFromEnum(std.wasm.NameSubsection.function));
try w.writeLeb128(f.function_imports.entries.len + wasm.functions.entries.len);
for (f.function_imports.keys(), 0..) |name_index, function_index| {
const name = name_index.slice(wasm);
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index)));
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try w.writeLeb128(function_index);
try w.writeLeb128(name.len);
try w.writeAll(name);
}
for (wasm.functions.keys(), f.function_imports.entries.len..) |resolution, function_index| {
const name = resolution.name(wasm).?;
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(function_index)));
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try w.writeLeb128(function_index);
try w.writeLeb128(name.len);
try w.writeAll(name);
}
}
{
const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.global));
const total_globals: u32 = @intCast(f.global_imports.entries.len + wasm.globals.entries.len);
try leb.writeUleb128(binary_bytes.writer(gpa), total_globals);
const sub_header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, sub_header_offset, @intFromEnum(std.wasm.NameSubsection.global));
try w.writeLeb128(f.global_imports.entries.len + wasm.globals.entries.len);
for (f.global_imports.keys(), 0..) |name_index, global_index| {
const name = name_index.slice(wasm);
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index)));
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try w.writeLeb128(global_index);
try w.writeLeb128(name.len);
try w.writeAll(name);
}
for (wasm.globals.keys(), f.global_imports.entries.len..) |resolution, global_index| {
const name = resolution.name(wasm).?;
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(global_index)));
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try w.writeLeb128(global_index);
try w.writeLeb128(name.len);
try w.writeAll(name);
}
}
{
const sub_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer replaceHeader(binary_bytes, sub_offset, @intFromEnum(std.wasm.NameSubsection.data_segment));
const sub_header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, sub_header_offset, @intFromEnum(std.wasm.NameSubsection.data_segment));
const total_data_segments: u32 = @intCast(data_segment_groups.len);
try leb.writeUleb128(binary_bytes.writer(gpa), total_data_segments);
for (data_segment_groups, 0..) |group, i| {
try w.writeLeb128(data_segment_groups.len);
for (data_segment_groups, 0..) |group, group_index| {
const name, _ = splitSegmentName(group.first_segment.name(wasm));
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(i)));
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try w.writeLeb128(group_index);
try w.writeLeb128(name.len);
try w.writeAll(name);
}
}
}
fn emitFeaturesSection(
gpa: Allocator,
binary_bytes: *std.ArrayListUnmanaged(u8),
target: *const std.Target,
) Allocator.Error!void {
fn emitFeaturesSection(aw: *Writer.Allocating, target: *const std.Target) !void {
const feature_count = target.cpu.features.count();
if (feature_count == 0) return;
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
const w = &aw.writer;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(std.wasm.Section.custom));
const writer = binary_bytes.writer(gpa);
const target_features = "target_features";
try leb.writeUleb128(writer, @as(u32, @intCast(target_features.len)));
try writer.writeAll(target_features);
try leb.writeUleb128(writer, @as(u32, @intCast(feature_count)));
const section_name = "target_features";
try w.writeLeb128(section_name.len);
try w.writeAll(section_name);
try w.writeLeb128(feature_count);
var safety_count = feature_count;
for (target.cpu.arch.allFeaturesList(), 0..) |*feature, i| {
if (!target.cpu.has(.wasm, @as(std.Target.wasm.Feature, @enumFromInt(i)))) continue;
safety_count -= 1;
try leb.writeUleb128(writer, @as(u32, '+'));
try w.writeUleb128('+');
// Depends on llvm_name for the hyphenated version that matches wasm tooling conventions.
const name = feature.llvm_name.?;
try leb.writeUleb128(writer, @as(u32, @intCast(name.len)));
try writer.writeAll(name);
try w.writeLeb128(name.len);
try w.writeAll(name);
}
assert(safety_count == 0);
}
fn emitBuildIdSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8), build_id: []const u8) !void {
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
fn emitBuildIdSection(aw: *Writer.Allocating, build_id: []const u8) !void {
const w = &aw.writer;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(std.wasm.Section.custom));
const writer = binary_bytes.writer(gpa);
const hdr_build_id = "build_id";
try leb.writeUleb128(writer, @as(u32, @intCast(hdr_build_id.len)));
try writer.writeAll(hdr_build_id);
const section_name = "build_id";
try w.writeLeb128(section_name.len);
try w.writeAll(section_name);
try leb.writeUleb128(writer, @as(u32, 1));
try leb.writeUleb128(writer, @as(u32, @intCast(build_id.len)));
try writer.writeAll(build_id);
try w.writeUleb128(1);
try w.writeLeb128(build_id.len);
try w.writeAll(build_id);
}
fn emitProducerSection(gpa: Allocator, binary_bytes: *std.ArrayListUnmanaged(u8)) !void {
const header_offset = try reserveCustomSectionHeader(gpa, binary_bytes);
defer writeCustomSectionHeader(binary_bytes, header_offset);
fn emitProducerSection(aw: *Writer.Allocating) !void {
const w = &aw.writer;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(std.wasm.Section.custom));
const writer = binary_bytes.writer(gpa);
const producers = "producers";
try leb.writeUleb128(writer, @as(u32, @intCast(producers.len)));
try writer.writeAll(producers);
const section_name = "producers";
try w.writeLeb128(section_name.len);
try w.writeAll(section_name);
try leb.writeUleb128(writer, @as(u32, 2)); // 2 fields: Language + processed-by
// language field
try w.writeUleb128(2); // 2 fields: language + processed-by
{
const language = "language";
try leb.writeUleb128(writer, @as(u32, @intCast(language.len)));
try writer.writeAll(language);
const field_name = "language";
try w.writeLeb128(field_name.len);
try w.writeAll(field_name);
// field_value_count (TODO: Parse object files for producer sections to detect their language)
try leb.writeUleb128(writer, @as(u32, 1));
try w.writeUleb128(1);
// versioned name
{
try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig"
try writer.writeAll("Zig");
const field_value = "Zig";
try w.writeLeb128(field_value.len);
try w.writeAll(field_value);
try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len)));
try writer.writeAll(build_options.version);
try w.writeLeb128(build_options.version.len);
try w.writeAll(build_options.version);
}
}
// processed-by field
{
const processed_by = "processed-by";
try leb.writeUleb128(writer, @as(u32, @intCast(processed_by.len)));
try writer.writeAll(processed_by);
const field_name = "processed-by";
try w.writeLeb128(field_name.len);
try w.writeAll(field_name);
// field_value_count (TODO: Parse object files for producer sections to detect other used tools)
try leb.writeUleb128(writer, @as(u32, 1));
try w.writeUleb128(1);
// versioned name
{
try leb.writeUleb128(writer, @as(u32, 3)); // len of "Zig"
try writer.writeAll("Zig");
const field_value = "Zig";
try w.writeLeb128(field_value.len);
try w.writeAll(field_value);
try leb.writeUleb128(writer, @as(u32, @intCast(build_options.version.len)));
try writer.writeAll(build_options.version);
try w.writeLeb128(build_options.version.len);
try w.writeAll(build_options.version);
}
}
}
@@ -1277,170 +1252,130 @@ fn wantSegmentMerge(
}
/// section id + fixed leb contents size + fixed leb vector length
const section_header_reserve_size = 1 + 5 + 5;
const section_header_size = 5 + 1;
const vec_section_header_size = section_header_size + size_header_size;
fn reserveVecSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
try bytes.appendNTimes(gpa, 0, section_header_reserve_size);
return @intCast(bytes.items.len - section_header_reserve_size);
fn reserveVecSectionHeader(w: *Writer) Writer.Error!u32 {
const offset = w.end;
_ = try w.writableSlice(vec_section_header_size);
return @intCast(offset);
}
fn replaceVecSectionHeader(
bytes: *std.ArrayListUnmanaged(u8),
aw: *Writer.Allocating,
offset: u32,
section: std.wasm.Section,
n_items: u32,
) void {
const size: u32 = @intCast(bytes.items.len - offset - section_header_reserve_size + uleb128size(n_items));
var buf: [section_header_reserve_size]u8 = undefined;
var fbw = std.io.fixedBufferStream(&buf);
const w = fbw.writer();
w.writeByte(@intFromEnum(section)) catch unreachable;
leb.writeUleb128(w, size) catch unreachable;
leb.writeUleb128(w, n_items) catch unreachable;
bytes.replaceRangeAssumeCapacity(offset, section_header_reserve_size, fbw.getWritten());
const header = aw.getWritten()[offset..][0..vec_section_header_size];
header[0] = @intFromEnum(section);
std.leb.writeUnsignedFixed(5, header[1..6], @intCast(aw.writer.end - offset - section_header_size));
std.leb.writeUnsignedFixed(5, header[6..], n_items);
}
fn reserveCustomSectionHeader(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
try bytes.appendNTimes(gpa, 0, section_header_size);
return @intCast(bytes.items.len - section_header_size);
const section_header_size = 1 + size_header_size;
fn reserveSectionHeader(w: *Writer) Writer.Error!u32 {
const offset = w.end;
_ = try w.writableSlice(section_header_size);
return @intCast(offset);
}
fn writeCustomSectionHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
return replaceHeader(bytes, offset, 0); // 0 = 'custom' section
fn replaceSectionHeader(aw: *Writer.Allocating, offset: u32, section: u8) void {
const header = aw.getWritten()[offset..][0..section_header_size];
header[0] = section;
std.leb.writeUnsignedFixed(5, header[1..6], @intCast(aw.writer.end - offset - section_header_size));
}
fn replaceHeader(bytes: *std.ArrayListUnmanaged(u8), offset: u32, tag: u8) void {
const size: u32 = @intCast(bytes.items.len - offset - section_header_size);
var buf: [section_header_size]u8 = undefined;
var fbw = std.io.fixedBufferStream(&buf);
const w = fbw.writer();
w.writeByte(tag) catch unreachable;
leb.writeUleb128(w, size) catch unreachable;
bytes.replaceRangeAssumeCapacity(offset, section_header_size, fbw.getWritten());
const size_header_size = 5;
fn reserveSizeHeader(w: *Writer) Writer.Error!u32 {
const offset = w.end;
_ = try w.writableSlice(size_header_size);
return @intCast(offset);
}
const max_size_encoding = 5;
fn reserveSize(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!u32 {
try bytes.appendNTimes(gpa, 0, max_size_encoding);
return @intCast(bytes.items.len - max_size_encoding);
fn replaceSizeHeader(aw: *Writer.Allocating, offset: u32) void {
const header = aw.getWritten()[offset..][0..size_header_size];
std.leb.writeUnsignedFixed(5, header[0..5], @intCast(aw.writer.end - offset - size_header_size));
}
fn replaceSize(bytes: *std.ArrayListUnmanaged(u8), offset: u32) void {
const size: u32 = @intCast(bytes.items.len - offset - max_size_encoding);
var buf: [max_size_encoding]u8 = undefined;
var fbw = std.io.fixedBufferStream(&buf);
leb.writeUleb128(fbw.writer(), size) catch unreachable;
bytes.replaceRangeAssumeCapacity(offset, max_size_encoding, fbw.getWritten());
}
fn emitLimits(
gpa: Allocator,
binary_bytes: *std.ArrayListUnmanaged(u8),
limits: std.wasm.Limits,
) Allocator.Error!void {
try binary_bytes.append(gpa, @bitCast(limits.flags));
try leb.writeUleb128(binary_bytes.writer(gpa), limits.min);
if (limits.flags.has_max) try leb.writeUleb128(binary_bytes.writer(gpa), limits.max);
fn emitLimits(w: *Writer, limits: std.wasm.Limits) Writer.Error!void {
try w.writeByte(@bitCast(limits.flags));
try w.writeLeb128(limits.min);
if (limits.flags.has_max) try w.writeLeb128(limits.max);
}
fn emitMemoryImport(
wasm: *Wasm,
binary_bytes: *std.ArrayListUnmanaged(u8),
w: *Writer,
name_index: String,
memory_import: *const Wasm.MemoryImport,
) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
) Writer.Error!void {
const module_name = memory_import.module_name.slice(wasm);
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(module_name.len)));
try binary_bytes.appendSlice(gpa, module_name);
try w.writeLeb128(module_name.len);
try w.writeAll(module_name);
const name = name_index.slice(wasm);
try leb.writeUleb128(binary_bytes.writer(gpa), @as(u32, @intCast(name.len)));
try binary_bytes.appendSlice(gpa, name);
try w.writeLeb128(name.len);
try w.writeAll(name);
try binary_bytes.append(gpa, @intFromEnum(std.wasm.ExternalKind.memory));
try emitLimits(gpa, binary_bytes, memory_import.limits());
try w.writeByte(@intFromEnum(std.wasm.ExternalKind.memory));
try emitLimits(w, memory_import.limits());
}
pub fn emitInit(writer: anytype, init_expr: std.wasm.InitExpression) !void {
pub fn emitInit(w: *Writer, init_expr: std.wasm.InitExpression) Writer.Error!void {
switch (init_expr) {
.i32_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try leb.writeIleb128(writer, val);
},
.i64_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
try leb.writeIleb128(writer, val);
},
.f32_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.f32_const));
try writer.writeInt(u32, @bitCast(val), .little);
},
.f64_const => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.f64_const));
try writer.writeInt(u64, @bitCast(val), .little);
},
.global_get => |val| {
try writer.writeByte(@intFromEnum(std.wasm.Opcode.global_get));
try leb.writeUleb128(writer, val);
inline else => |val, tag| {
try w.writeByte(@intFromEnum(@field(std.wasm.Opcode, @tagName(tag))));
switch (@typeInfo(@TypeOf(val))) {
.int => try w.writeLeb128(val),
.float => |float| try w.writeInt(
@Type(.{ .int = .{ .signedness = .unsigned, .bits = float.bits } }),
@bitCast(val),
.little,
),
else => comptime unreachable,
}
},
}
try writer.writeByte(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
pub fn emitExpr(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8), expr: Wasm.Expr) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
pub fn emitExpr(wasm: *const Wasm, w: *Writer, expr: Wasm.Expr) Writer.Error!void {
const slice = expr.slice(wasm);
try binary_bytes.appendSlice(gpa, slice[0 .. slice.len + 1]); // +1 to include end opcode
try w.writeAll(slice[0 .. slice.len + 1]); // +1 to include end opcode
}
fn emitSegmentInfo(wasm: *Wasm, binary_bytes: *std.ArrayList(u8)) !void {
const gpa = wasm.base.comp.gpa;
const writer = binary_bytes.writer(gpa);
try leb.writeUleb128(writer, @intFromEnum(Wasm.SubsectionType.segment_info));
const segment_offset = binary_bytes.items.len;
fn emitSegmentInfo(wasm: *Wasm, aw: *Writer) Writer.Error!void {
const w = &aw.writer;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(Wasm.SubsectionType.segment_info));
try leb.writeUleb128(writer, @as(u32, @intCast(wasm.segment_info.count())));
try w.writeLeb128(wasm.segment_info.count());
for (wasm.segment_info.values()) |segment_info| {
log.debug("Emit segment: {s} align({d}) flags({b})", .{
segment_info.name,
segment_info.alignment,
segment_info.flags,
});
try leb.writeUleb128(writer, @as(u32, @intCast(segment_info.name.len)));
try writer.writeAll(segment_info.name);
try leb.writeUleb128(writer, segment_info.alignment.toLog2Units());
try leb.writeUleb128(writer, segment_info.flags);
try w.writeLeb128(segment_info.name.len);
try w.writeAll(segment_info.name);
try w.writeLeb128(segment_info.alignment.toLog2Units());
try w.writeLeb128(segment_info.flags);
}
var buf: [5]u8 = undefined;
leb.writeUnsignedFixed(5, &buf, @as(u32, @intCast(binary_bytes.items.len - segment_offset)));
try binary_bytes.insertSlice(segment_offset, &buf);
}
fn uleb128size(x: u32) u32 {
var value = x;
var size: u32 = 0;
while (value != 0) : (size += 1) value >>= 7;
return size;
}
fn emitTagNameTable(
gpa: Allocator,
code: *std.ArrayListUnmanaged(u8),
w: *Writer,
tag_name_offs: []const u32,
tag_name_bytes: []const u8,
base: u32,
comptime Int: type,
) error{OutOfMemory}!void {
const ptr_size_bytes = @divExact(@bitSizeOf(Int), 8);
try code.ensureUnusedCapacity(gpa, ptr_size_bytes * 2 * tag_name_offs.len);
) Writer.Error!void {
for (tag_name_offs) |off| {
const name_len: u32 = @intCast(mem.indexOfScalar(u8, tag_name_bytes[off..], 0).?);
mem.writeInt(Int, code.addManyAsArrayAssumeCapacity(ptr_size_bytes), base + off, .little);
mem.writeInt(Int, code.addManyAsArrayAssumeCapacity(ptr_size_bytes), name_len, .little);
try w.writeInt(Int, base + off, .little);
try w.writeInt(Int, name_len, .little);
}
}
@@ -1525,11 +1460,11 @@ fn reloc_u64_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
}
fn reloc_sleb_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
leb.writeSignedFixed(5, code[0..5], i.toAbi());
std.leb.writeSignedFixed(5, code[0..5], i.toAbi());
}
fn reloc_sleb64_table_index(code: []u8, i: IndirectFunctionTableIndex) void {
leb.writeSignedFixed(11, code[0..11], i.toAbi());
std.leb.writeSignedFixed(11, code[0..11], i.toAbi());
}
fn reloc_u32_function(code: []u8, function: Wasm.OutputFunctionIndex) void {
@@ -1537,7 +1472,7 @@ fn reloc_u32_function(code: []u8, function: Wasm.OutputFunctionIndex) void {
}
fn reloc_leb_function(code: []u8, function: Wasm.OutputFunctionIndex) void {
leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(function));
std.leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(function));
}
fn reloc_u32_global(code: []u8, global: Wasm.GlobalIndex) void {
@@ -1545,7 +1480,7 @@ fn reloc_u32_global(code: []u8, global: Wasm.GlobalIndex) void {
}
fn reloc_leb_global(code: []u8, global: Wasm.GlobalIndex) void {
leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(global));
std.leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(global));
}
const RelocAddr = struct {
@@ -1581,35 +1516,31 @@ fn reloc_u64_addr(code: []u8, ra: RelocAddr) void {
}
fn reloc_leb_addr(code: []u8, ra: RelocAddr) void {
leb.writeUnsignedFixed(5, code[0..5], ra.addr);
std.leb.writeUnsignedFixed(5, code[0..5], ra.addr);
}
fn reloc_leb64_addr(code: []u8, ra: RelocAddr) void {
leb.writeUnsignedFixed(11, code[0..11], ra.addr);
std.leb.writeUnsignedFixed(11, code[0..11], ra.addr);
}
fn reloc_sleb_addr(code: []u8, ra: RelocAddr) void {
leb.writeSignedFixed(5, code[0..5], ra.addr);
std.leb.writeSignedFixed(5, code[0..5], ra.addr);
}
fn reloc_sleb64_addr(code: []u8, ra: RelocAddr) void {
leb.writeSignedFixed(11, code[0..11], ra.addr);
std.leb.writeSignedFixed(11, code[0..11], ra.addr);
}
fn reloc_leb_table(code: []u8, table: Wasm.TableIndex) void {
leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(table));
std.leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(table));
}
fn reloc_leb_type(code: []u8, index: FuncTypeIndex) void {
leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(index));
std.leb.writeUnsignedFixed(5, code[0..5], @intFromEnum(index));
}
fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
const gpa = wasm.base.comp.gpa;
try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1);
appendReservedUleb32(binary_bytes, 0); // no locals
fn emitCallCtorsFunction(wasm: *const Wasm, w: *Writer) Writer.Error!void {
try w.writeUleb128(0); // no locals
for (wasm.object_init_funcs.items) |init_func| {
const func = init_func.function_index.ptr(wasm);
if (!func.object_index.ptr(wasm).is_included) continue;
@@ -1617,25 +1548,18 @@ fn emitCallCtorsFunction(wasm: *const Wasm, binary_bytes: *std.ArrayListUnmanage
const n_returns = ty.returns.slice(wasm).len;
// Call function by its function index
try binary_bytes.ensureUnusedCapacity(gpa, 1 + 5 + n_returns + 1);
const call_index: Wasm.OutputFunctionIndex = .fromObjectFunction(wasm, init_func.function_index);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
appendReservedUleb32(binary_bytes, @intFromEnum(call_index));
try w.writeByte(@intFromEnum(std.wasm.Opcode.call));
try w.writeLeb128(@intFromEnum(call_index));
// drop all returned values from the stack as __wasm_call_ctors has no return value
binary_bytes.appendNTimesAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop), n_returns);
try w.splatByteAll(@intFromEnum(std.wasm.Opcode.drop), n_returns);
}
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end function body
try w.writeByte(@intFromEnum(std.wasm.Opcode.end)); // end function body
}
fn emitInitMemoryFunction(
wasm: *const Wasm,
binary_bytes: *std.ArrayListUnmanaged(u8),
virtual_addrs: *const VirtualAddrs,
) Allocator.Error!void {
fn emitInitMemoryFunction(wasm: *const Wasm, w: *Writer, virtual_addrs: *const VirtualAddrs) Writer.Error!void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const shared_memory = comp.config.shared_memory;
// Passive segments are used to avoid memory being reinitialized on each
@@ -1645,39 +1569,40 @@ fn emitInitMemoryFunction(
// function.
assert(wasm.any_passive_inits);
try binary_bytes.ensureUnusedCapacity(gpa, 5 + 1);
appendReservedUleb32(binary_bytes, 0); // no locals
try w.writeUleb128(0); // no locals
if (virtual_addrs.init_memory_flag) |flag_address| {
assert(shared_memory);
try binary_bytes.ensureUnusedCapacity(gpa, 2 * 3 + 6 * 3 + 1 + 6 * 3 + 1 + 5 * 4 + 1 + 1);
// destination blocks
// based on values we jump to corresponding label
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $drop
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $wait
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block)); // $init
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
try w.writeAll(&.{
@intFromEnum(std.wasm.Opcode.block), // $drop
@intFromEnum(std.wasm.BlockType.empty),
@intFromEnum(std.wasm.Opcode.block), // $wait
@intFromEnum(std.wasm.BlockType.empty),
@intFromEnum(std.wasm.Opcode.block), // $init
@intFromEnum(std.wasm.BlockType.empty),
});
// atomically check
appendReservedI32Const(binary_bytes, flag_address);
appendReservedI32Const(binary_bytes, 0);
appendReservedI32Const(binary_bytes, 1);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_rmw_cmpxchg));
appendReservedUleb32(binary_bytes, 2); // alignment
appendReservedUleb32(binary_bytes, 0); // offset
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(flag_address)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(1);
try w.writeByte(@intFromEnum(std.wasm.Opcode.atomics_prefix));
try w.writeLeb128(@intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_rmw_cmpxchg));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeUleb128(0); // offset
// based on the value from the atomic check, jump to the label.
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_table));
appendReservedUleb32(binary_bytes, 2); // length of the table (we have 3 blocks but because of the mandatory default the length is 2).
appendReservedUleb32(binary_bytes, 0); // $init
appendReservedUleb32(binary_bytes, 1); // $wait
appendReservedUleb32(binary_bytes, 2); // $drop
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.br_table));
try w.writeUleb128(3 - 1); // length of the table (we have 3 blocks but because of the mandatory default the length is 2).
try w.writeUleb128(0); // $init
try w.writeUleb128(1); // $wait
try w.writeUleb128(2); // $drop
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
const segment_groups = wasm.flush_buffer.data_segment_groups.items;
@@ -1690,74 +1615,82 @@ fn emitInitMemoryFunction(
const start_addr: u32 = @intCast(segment.alignment(wasm).forward(prev_end));
const segment_size: u32 = group.end_addr - start_addr;
try binary_bytes.ensureUnusedCapacity(gpa, 6 + 6 + 1 + 5 + 6 + 6 + 1 + 6 * 2 + 1 + 1);
// For passive BSS segments we can simply issue a memory.fill(0). For
// non-BSS segments we do a memory.init. Both instructions take as
// their first argument the destination address.
appendReservedI32Const(binary_bytes, start_addr);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(start_addr)));
if (shared_memory and segment.isTls(wasm)) {
// When we initialize the TLS segment we also set the `__tls_base`
// global. This allows the runtime to use this static copy of the
// TLS data for the first/main thread.
appendReservedI32Const(binary_bytes, start_addr);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
appendReservedUleb32(binary_bytes, virtual_addrs.tls_base.?);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(start_addr)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.global_set));
try w.writeLeb128(virtual_addrs.tls_base.?);
}
appendReservedI32Const(binary_bytes, 0);
appendReservedI32Const(binary_bytes, segment_size);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(segment_size)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.misc_prefix));
if (segment.isBss(wasm)) {
// fill bss segment with zeroes
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.memory_fill));
try w.writeLeb128(@intFromEnum(std.wasm.MiscOpcode.memory_fill));
} else {
// initialize the segment
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.memory_init));
appendReservedUleb32(binary_bytes, @intCast(segment_index));
try w.writeLeb128(@intFromEnum(std.wasm.MiscOpcode.memory_init));
try w.writeLeb128(segment_index);
}
binary_bytes.appendAssumeCapacity(0); // memory index immediate
try w.writeByte(0); // memory index immediate
}
if (virtual_addrs.init_memory_flag) |flag_address| {
assert(shared_memory);
try binary_bytes.ensureUnusedCapacity(gpa, 6 + 6 + 1 + 3 * 5 + 6 + 1 + 5 + 1 + 3 * 5 + 1 + 1 + 5 + 1 + 6 * 2 + 1 + 5 + 1 + 3 * 5 + 1 + 1 + 1);
// we set the init memory flag to value '2'
appendReservedI32Const(binary_bytes, flag_address);
appendReservedI32Const(binary_bytes, 2);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_store));
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(flag_address)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(2);
try w.writeByte(@intFromEnum(std.wasm.Opcode.atomics_prefix));
try w.writeLeb128(@intFromEnum(std.wasm.AtomicsOpcode.i32_atomic_store));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeUleb128(0); // offset
// notify any waiters for segment initialization completion
appendReservedI32Const(binary_bytes, flag_address);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(binary_bytes.fixedWriter(), @as(i32, -1)) catch unreachable; // number of waiters
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_notify));
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(flag_address)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(-1); // number of waiters
try w.writeByte(@intFromEnum(std.wasm.Opcode.atomics_prefix));
try w.writeLeb128(@intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_notify));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeUleb128(0); // offset
try w.writeByte(@intFromEnum(std.wasm.Opcode.drop));
// branch and drop segments
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br));
appendReservedUleb32(binary_bytes, @as(u32, 1));
try w.writeByte(@intFromEnum(std.wasm.Opcode.br));
try w.writeUleb128(1);
// wait for thread to initialize memory segments
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end $wait
appendReservedI32Const(binary_bytes, flag_address);
appendReservedI32Const(binary_bytes, 1); // expected flag value
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
leb.writeIleb128(binary_bytes.fixedWriter(), @as(i64, -1)) catch unreachable; // timeout
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.atomics_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_wait32));
appendReservedUleb32(binary_bytes, @as(u32, 2)); // alignment
appendReservedUleb32(binary_bytes, @as(u32, 0)); // offset
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.drop));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end)); // end $wait
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(flag_address)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(1); // expected flag value
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
try w.writeSleb128(-1); // timeout
try w.writeByte(@intFromEnum(std.wasm.Opcode.atomics_prefix));
try w.writeByte(@intFromEnum(std.wasm.AtomicsOpcode.memory_atomic_wait32));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeUleb128(0); // offset
try w.writeByte(@intFromEnum(std.wasm.Opcode.drop));
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end)); // end $drop
try w.writeByte(@intFromEnum(std.wasm.Opcode.end)); // end $drop
}
for (segment_groups, 0..) |group, segment_index| {
@@ -1768,26 +1701,20 @@ fn emitInitMemoryFunction(
// during the initialization of each thread (__wasm_init_tls).
if (shared_memory and segment.isTls(wasm)) continue;
try binary_bytes.ensureUnusedCapacity(gpa, 1 + 5 + 5 + 1);
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
appendReservedUleb32(binary_bytes, @intFromEnum(std.wasm.MiscOpcode.data_drop));
appendReservedUleb32(binary_bytes, @intCast(segment_index));
try w.writeByte(@intFromEnum(std.wasm.Opcode.misc_prefix));
try w.writeLeb128(@intFromEnum(std.wasm.MiscOpcode.data_drop));
try w.writeLeb128(segment_index);
}
// End of the function body
binary_bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
fn emitInitTlsFunction(wasm: *const Wasm, w: *Writer) Writer.Error!void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
assert(comp.config.shared_memory);
try bytes.ensureUnusedCapacity(gpa, 5 * 10 + 8);
appendReservedUleb32(bytes, 0); // no locals
try w.writeUleb128(0); // no locals
// If there's a TLS segment, initialize it during runtime using the bulk-memory feature
// TLS segment is always the first one due to how we sort the data segments.
@@ -1796,36 +1723,35 @@ fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Al
const start_addr = wasm.flush_buffer.data_segments.values()[0];
const end_addr = wasm.flush_buffer.data_segment_groups.items[0].end_addr;
const group_size = end_addr - start_addr;
const data_segment_index = 0;
const data_segment_index: u32 = 0;
const param_local: u32 = 0;
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(bytes, param_local);
try w.writeByte(@intFromEnum(std.wasm.Opcode.local_get));
try w.writeLeb128(param_local);
const tls_base_global_index: Wasm.GlobalIndex = @enumFromInt(wasm.globals.getIndex(.__tls_base).?);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
appendReservedUleb32(bytes, @intFromEnum(tls_base_global_index));
try w.writeByte(@intFromEnum(std.wasm.Opcode.global_set));
try w.writeLeb128(@intFromEnum(tls_base_global_index));
// load stack values for the bulk-memory operation
{
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(bytes, param_local);
try w.writeByte(@intFromEnum(std.wasm.Opcode.local_get));
try w.writeLeb128(param_local);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
appendReservedUleb32(bytes, 0); //segment offset
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeSleb128(0); // segment offset
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
appendReservedUleb32(bytes, group_size); //segment offset
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(group_size))); // segment offset
}
// perform the bulk-memory operation to initialize the data segment
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.misc_prefix));
appendReservedUleb32(bytes, @intFromEnum(std.wasm.MiscOpcode.memory_init));
try w.writeByte(@intFromEnum(std.wasm.Opcode.misc_prefix));
try w.writeLeb128(@intFromEnum(std.wasm.MiscOpcode.memory_init));
// segment immediate
appendReservedUleb32(bytes, data_segment_index);
// memory index immediate (always 0)
appendReservedUleb32(bytes, 0);
try w.writeLeb128(data_segment_index);
try w.writeByte(0); // memory index immediate
}
// If we have to perform any TLS relocations, call the corresponding function
@@ -1833,56 +1759,59 @@ fn emitInitTlsFunction(wasm: *const Wasm, bytes: *std.ArrayListUnmanaged(u8)) Al
// generated by the linker.
if (wasm.functions.getIndex(.__wasm_apply_global_tls_relocs)) |function_index| {
const output_function_index: Wasm.OutputFunctionIndex = .fromFunctionIndex(wasm, @enumFromInt(function_index));
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.call));
appendReservedUleb32(bytes, @intFromEnum(output_function_index));
try w.writeByte(@intFromEnum(std.wasm.Opcode.call));
try w.writeLeb128(@intFromEnum(output_function_index));
}
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
fn emitStartSection(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), i: Wasm.OutputFunctionIndex) !void {
const header_offset = try reserveVecSectionHeader(gpa, bytes);
replaceVecSectionHeader(bytes, header_offset, .start, @intFromEnum(i));
fn emitStartSection(aw: *Writer.Allocating, i: Wasm.OutputFunctionIndex) !void {
const header_offset = try reserveVecSectionHeader(&aw.writer);
defer replaceVecSectionHeader(aw, header_offset, .start, @intFromEnum(i));
}
fn emitTagNameFunction(
wasm: *Wasm,
code: *std.ArrayListUnmanaged(u8),
w: *Writer,
table_base_addr: u32,
table_index: u32,
enum_type_ip: InternPool.Index,
) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_type_ip);
const tag_values = enum_type.values.get(ip);
try code.ensureUnusedCapacity(gpa, 7 * 5 + 6 + 1 * 6);
appendReservedUleb32(code, 0); // no locals
try w.writeUleb128(0); // no locals
const slice_abi_size = 8;
const encoded_alignment = @ctz(@as(u32, 4));
const slice_abi_size: u32 = 8;
if (tag_values.len == 0) {
// Then it's auto-numbered and therefore a direct table lookup.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.local_get));
try w.writeUleb128(0);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
try w.writeByte(@intFromEnum(std.wasm.Opcode.local_get));
try w.writeUleb128(1);
appendReservedI32Const(code, slice_abi_size);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_mul));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
if (std.math.isPowerOfTwo(slice_abi_size)) {
try w.writeLeb128(@as(i32, @bitCast(@as(u32, std.math.log2_int(u32, slice_abi_size)))));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_shl));
} else {
try w.writeLeb128(@as(i32, @bitCast(slice_abi_size)));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_mul));
}
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load));
appendReservedUleb32(code, encoded_alignment);
appendReservedUleb32(code, table_base_addr + table_index * 8);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_load));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeLeb128(table_base_addr + slice_abi_size * table_index);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_store));
appendReservedUleb32(code, encoded_alignment);
appendReservedUleb32(code, 0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_store));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeUleb128(0);
} else {
const int_info = Zcu.Type.intInfo(.fromInterned(enum_type.tag_ty), zcu);
const outer_block_type: std.wasm.BlockType = switch (int_info.bits) {
@@ -1891,94 +1820,80 @@ fn emitTagNameFunction(
else => return diags.fail("wasm linker does not yet implement @tagName for sparse enums with more than 64 bit integer tag types", .{}),
};
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.local_get));
try w.writeUleb128(0);
// Outer block that computes table offset.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
code.appendAssumeCapacity(@intFromEnum(outer_block_type));
try w.writeByte(@intFromEnum(std.wasm.Opcode.block));
try w.writeByte(@intFromEnum(outer_block_type));
for (tag_values, 0..) |tag_value, tag_index| {
// block for this if case
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
try w.writeByte(@intFromEnum(std.wasm.Opcode.block));
try w.writeByte(@intFromEnum(std.wasm.BlockType.empty));
// Tag value whose name should be returned.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
try w.writeByte(@intFromEnum(std.wasm.Opcode.local_get));
try w.writeUleb128(1);
const val: Zcu.Value = .fromInterned(tag_value);
switch (outer_block_type) {
.i32 => {
const x: u32 = switch (int_info.signedness) {
.signed => @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))),
.unsigned => @intCast(val.toUnsignedInt(zcu)),
};
appendReservedI32Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_ne));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, switch (int_info.signedness) {
.signed => @intCast(val.toSignedInt(zcu)),
.unsigned => @bitCast(@as(u32, @intCast(val.toUnsignedInt(zcu)))),
}));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_ne));
},
.i64 => {
const x: u64 = switch (int_info.signedness) {
.signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(zcu),
};
appendReservedI64Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_const));
try w.writeLeb128(@as(i64, switch (int_info.signedness) {
.signed => val.toSignedInt(zcu),
.unsigned => @bitCast(val.toUnsignedInt(zcu)),
}));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_ne));
},
else => unreachable,
}
// if they're not equal, break out of current branch
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
appendReservedUleb32(code, 0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.br_if));
try w.writeUleb128(0);
// Put the table offset of the result on the stack.
appendReservedI32Const(code, @intCast(tag_index * slice_abi_size));
try w.writeByte(@intFromEnum(std.wasm.Opcode.i32_const));
try w.writeLeb128(@as(i32, @bitCast(@as(u32, @intCast(slice_abi_size * tag_index)))));
// break outside blocks
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br));
appendReservedUleb32(code, 1);
try w.writeByte(@intFromEnum(std.wasm.Opcode.br));
try w.writeUleb128(1);
// end the block for this case
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.@"unreachable"));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.@"unreachable"));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load));
appendReservedUleb32(code, encoded_alignment);
appendReservedUleb32(code, table_base_addr + table_index * 8);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_load));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeLeb128(table_base_addr + slice_abi_size * table_index);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_store));
appendReservedUleb32(code, encoded_alignment);
appendReservedUleb32(code, 0);
try w.writeByte(@intFromEnum(std.wasm.Opcode.i64_store));
try w.writeLeb128(comptime Alignment.@"4".toLog2Units());
try w.writeUleb128(0);
}
// End of the function body
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
/// Writes an unsigned 32-bit integer as a LEB128-encoded 'i32.const' value.
fn appendReservedI32Const(bytes: *std.ArrayListUnmanaged(u8), val: u32) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
leb.writeIleb128(bytes.fixedWriter(), @as(i32, @bitCast(val))) catch unreachable;
}
/// Writes an unsigned 64-bit integer as a LEB128-encoded 'i64.const' value.
fn appendReservedI64Const(bytes: *std.ArrayListUnmanaged(u8), val: u64) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
leb.writeIleb128(bytes.fixedWriter(), @as(i64, @bitCast(val))) catch unreachable;
}
fn appendReservedUleb32(bytes: *std.ArrayListUnmanaged(u8), val: u32) void {
leb.writeUleb128(bytes.fixedWriter(), val) catch unreachable;
}
fn appendGlobal(gpa: Allocator, bytes: *std.ArrayListUnmanaged(u8), mutable: u8, val: u32) Allocator.Error!void {
try bytes.ensureUnusedCapacity(gpa, 9);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Valtype.i32));
bytes.appendAssumeCapacity(mutable);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
appendReservedUleb32(bytes, val);
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.end));
fn appendGlobal(w: *Writer, mutable: bool, val: u32) Writer.Error!void {
try w.writeAll(&.{
@intFromEnum(std.wasm.Valtype.i32),
@intFromBool(mutable),
@intFromEnum(std.wasm.Opcode.i32_const),
});
try w.writeLeb128(val);
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
+268 -321
View File
@@ -8,6 +8,7 @@ const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
const log = std.log.scoped(.object);
const assert = std.debug.assert;
const Reader = std.Io.Reader;
/// Wasm spec version used for this `Object`
version: u32,
@@ -252,25 +253,21 @@ pub const ScratchSpace = struct {
pub fn parse(
wasm: *Wasm,
bytes: []const u8,
br: *Reader,
path: Path,
archive_member_name: ?[]const u8,
host_name: Wasm.OptionalString,
ss: *ScratchSpace,
must_link: bool,
gc_sections: bool,
) anyerror!Object {
) !Object {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
var pos: usize = 0;
if (!std.mem.eql(u8, try br.takeArray(std.wasm.magic.len), &std.wasm.magic)) return error.BadObjectMagic;
if (!std.mem.eql(u8, bytes[0..std.wasm.magic.len], &std.wasm.magic)) return error.BadObjectMagic;
pos += std.wasm.magic.len;
const version = std.mem.readInt(u32, bytes[pos..][0..4], .little);
pos += 4;
const version = try br.takeInt(u32, .little);
const data_segment_start: u32 = @intCast(wasm.object_data_segments.items.len);
const custom_segment_start: u32 = @intCast(wasm.object_custom_segments.entries.len);
@@ -298,200 +295,187 @@ pub fn parse(
var code_section_index: ?Wasm.ObjectSectionIndex = null;
var global_section_index: ?Wasm.ObjectSectionIndex = null;
var data_section_index: ?Wasm.ObjectSectionIndex = null;
while (pos < bytes.len) : (wasm.object_total_sections += 1) {
while (br.takeEnum(std.wasm.Section, .little)) |section_tag| : (wasm.object_total_sections += 1) {
const section_index: Wasm.ObjectSectionIndex = @enumFromInt(wasm.object_total_sections);
const section_tag: std.wasm.Section = @enumFromInt(bytes[pos]);
pos += 1;
const len, pos = readLeb(u32, bytes, pos);
const section_end = pos + len;
const len = try br.takeLeb128(u32);
const section_end = br.seek + len;
switch (section_tag) {
.custom => {
const section_name, pos = readBytes(bytes, pos);
const section_name = try br.take(try br.takeLeb128(u32));
if (std.mem.eql(u8, section_name, "linking")) {
saw_linking_section = true;
const section_version, pos = readLeb(u32, bytes, pos);
const section_version = try br.takeLeb128(u32);
log.debug("link meta data version: {d}", .{section_version});
if (section_version != 2) return error.UnsupportedVersion;
while (pos < section_end) {
const sub_type, pos = readLeb(u8, bytes, pos);
log.debug("found subsection: {s}", .{@tagName(@as(SubsectionType, @enumFromInt(sub_type)))});
const payload_len, pos = readLeb(u32, bytes, pos);
while (br.seek < section_end) {
const sub_type = try br.takeEnum(SubsectionType, .little);
log.debug("found subsection: {s}", .{@tagName(sub_type)});
const payload_len = try br.takeLeb128(u32);
if (payload_len == 0) break;
const count, pos = readLeb(u32, bytes, pos);
switch (@as(SubsectionType, @enumFromInt(sub_type))) {
.segment_info => {
for (try ss.segment_info.addManyAsSlice(gpa, count)) |*segment| {
const name, pos = readBytes(bytes, pos);
const alignment, pos = readLeb(u32, bytes, pos);
const flags_u32, pos = readLeb(u32, bytes, pos);
const flags: SegmentInfo.Flags = @bitCast(flags_u32);
const tls = flags.tls or
// Supports legacy object files that specified
// being TLS by the name instead of the TLS flag.
std.mem.startsWith(u8, name, ".tdata") or
std.mem.startsWith(u8, name, ".tbss");
has_tls = has_tls or tls;
segment.* = .{
.name = try wasm.internString(name),
.flags = .{
.strings = flags.strings,
.tls = tls,
.alignment = @enumFromInt(alignment),
.retain = flags.retain,
},
};
}
const count = try br.takeLeb128(u32);
switch (sub_type) {
.segment_info => for (try ss.segment_info.addManyAsSlice(gpa, count)) |*segment| {
const name = try br.take(try br.takeLeb128(u32));
const alignment: Alignment = .fromLog2Units(try br.takeLeb128(u32));
const flags: SegmentInfo.Flags = @bitCast(try br.takeLeb128(u32));
const tls = flags.tls or
// Supports legacy object files that specified
// being TLS by the name instead of the TLS flag.
std.mem.startsWith(u8, name, ".tdata") or
std.mem.startsWith(u8, name, ".tbss");
has_tls = has_tls or tls;
segment.* = .{
.name = try wasm.internString(name),
.flags = .{
.strings = flags.strings,
.tls = tls,
.alignment = alignment,
.retain = flags.retain,
},
};
},
.init_funcs => {
for (try wasm.object_init_funcs.addManyAsSlice(gpa, count)) |*func| {
const priority, pos = readLeb(u32, bytes, pos);
const symbol_index, pos = readLeb(u32, bytes, pos);
if (symbol_index > ss.symbol_table.items.len)
return diags.failParse(path, "init_funcs before symbol table", .{});
const sym = &ss.symbol_table.items[symbol_index];
if (sym.pointee != .function) {
return diags.failParse(path, "init_func symbol '{s}' not a function", .{
sym.name.slice(wasm).?,
});
} else if (sym.flags.undefined) {
return diags.failParse(path, "init_func symbol '{s}' is an import", .{
sym.name.slice(wasm).?,
});
}
func.* = .{
.priority = priority,
.function_index = sym.pointee.function,
};
.init_funcs => for (try wasm.object_init_funcs.addManyAsSlice(gpa, count)) |*func| {
const priority = try br.takeLeb128(u32);
const symbol_index = try br.takeLeb128(u32);
if (symbol_index > ss.symbol_table.items.len)
return diags.failParse(path, "init_funcs before symbol table", .{});
const sym = &ss.symbol_table.items[symbol_index];
if (sym.pointee != .function) {
return diags.failParse(path, "init_func symbol '{s}' not a function", .{
sym.name.slice(wasm).?,
});
} else if (sym.flags.undefined) {
return diags.failParse(path, "init_func symbol '{s}' is an import", .{
sym.name.slice(wasm).?,
});
}
func.* = .{
.priority = priority,
.function_index = sym.pointee.function,
};
},
.comdat_info => {
for (try wasm.object_comdats.addManyAsSlice(gpa, count)) |*comdat| {
const name, pos = readBytes(bytes, pos);
const flags, pos = readLeb(u32, bytes, pos);
if (flags != 0) return error.UnexpectedComdatFlags;
const symbol_count, pos = readLeb(u32, bytes, pos);
const start_off: u32 = @intCast(wasm.object_comdat_symbols.len);
try wasm.object_comdat_symbols.ensureUnusedCapacity(gpa, symbol_count);
for (0..symbol_count) |_| {
const kind, pos = readEnum(Wasm.Comdat.Symbol.Type, bytes, pos);
const index, pos = readLeb(u32, bytes, pos);
if (true) @panic("TODO rebase index depending on kind");
wasm.object_comdat_symbols.appendAssumeCapacity(.{
.kind = kind,
.index = index,
});
}
comdat.* = .{
.name = try wasm.internString(name),
.flags = flags,
.symbols = .{
.off = start_off,
.len = @intCast(wasm.object_comdat_symbols.len - start_off),
},
};
.comdat_info => for (try wasm.object_comdats.addManyAsSlice(gpa, count)) |*comdat| {
const name = try br.take(try br.takeLeb128(u32));
const flags = try br.takeLeb128(u32);
if (flags != 0) return error.UnexpectedComdatFlags;
const symbol_count = try br.takeLeb128(u32);
const start_off: u32 = @intCast(wasm.object_comdat_symbols.len);
try wasm.object_comdat_symbols.ensureUnusedCapacity(gpa, symbol_count);
for (0..symbol_count) |_| {
const kind = try br.takeEnum(Wasm.Comdat.Symbol.Type, .little);
const index = try br.takeLeb128(u32);
if (true) @panic("TODO rebase index depending on kind");
wasm.object_comdat_symbols.appendAssumeCapacity(.{
.kind = kind,
.index = index,
});
}
comdat.* = .{
.name = try wasm.internString(name),
.flags = flags,
.symbols = .{
.off = start_off,
.len = @intCast(wasm.object_comdat_symbols.len - start_off),
},
};
},
.symbol_table => {
for (try ss.symbol_table.addManyAsSlice(gpa, count)) |*symbol| {
const tag, pos = readEnum(Symbol.Tag, bytes, pos);
const flags, pos = readLeb(u32, bytes, pos);
symbol.* = .{
.flags = @bitCast(flags),
.name = .none,
.pointee = undefined,
};
symbol.flags.initZigSpecific(must_link, gc_sections);
.symbol_table => for (try ss.symbol_table.addManyAsSlice(gpa, count)) |*symbol| {
const tag = try br.takeEnum(Symbol.Tag, .little);
const flags: Wasm.SymbolFlags = @bitCast(try br.takeLeb128(u32));
symbol.* = .{
.flags = flags,
.name = .none,
.pointee = undefined,
};
symbol.flags.initZigSpecific(must_link, gc_sections);
switch (tag) {
.data => {
const name, pos = readBytes(bytes, pos);
const interned_name = try wasm.internString(name);
symbol.name = interned_name.toOptional();
if (symbol.flags.undefined) {
symbol.pointee = .data_import;
} else {
const segment_index, pos = readLeb(u32, bytes, pos);
const segment_offset, pos = readLeb(u32, bytes, pos);
const size, pos = readLeb(u32, bytes, pos);
try wasm.object_datas.append(gpa, .{
.segment = @enumFromInt(data_segment_start + segment_index),
.offset = segment_offset,
.size = size,
.name = interned_name,
.flags = symbol.flags,
});
symbol.pointee = .{
.data = @enumFromInt(wasm.object_datas.items.len - 1),
};
}
},
.section => {
const local_section, pos = readLeb(u32, bytes, pos);
const section: Wasm.ObjectSectionIndex = @enumFromInt(local_section_index_base + local_section);
symbol.pointee = .{ .section = section };
},
switch (tag) {
.data => {
const name = try br.take(try br.takeLeb128(u32));
const interned_name = try wasm.internString(name);
symbol.name = interned_name.toOptional();
if (symbol.flags.undefined) {
symbol.pointee = .data_import;
} else {
const segment_index = try br.takeLeb128(u32);
const segment_offset = try br.takeLeb128(u32);
const size = try br.takeLeb128(u32);
try wasm.object_datas.append(gpa, .{
.segment = @enumFromInt(data_segment_start + segment_index),
.offset = segment_offset,
.size = size,
.name = interned_name,
.flags = symbol.flags,
});
symbol.pointee = .{
.data = @enumFromInt(wasm.object_datas.items.len - 1),
};
}
},
.section => {
const local_section = try br.takeLeb128(u32);
const section: Wasm.ObjectSectionIndex = @enumFromInt(local_section_index_base + local_section);
symbol.pointee = .{ .section = section };
},
.function => {
const local_index, pos = readLeb(u32, bytes, pos);
if (symbol.flags.undefined) {
const function_import: ScratchSpace.FuncImportIndex = @enumFromInt(local_index);
symbol.pointee = .{ .function_import = function_import };
if (symbol.flags.explicit_name) {
const name, pos = readBytes(bytes, pos);
symbol.name = (try wasm.internString(name)).toOptional();
} else {
symbol.name = function_import.ptr(ss).name.toOptional();
}
} else {
symbol.pointee = .{ .function = @enumFromInt(functions_start + (local_index - ss.func_imports.items.len)) };
const name, pos = readBytes(bytes, pos);
.function => {
const local_index = try br.takeLeb128(u32);
if (symbol.flags.undefined) {
const function_import: ScratchSpace.FuncImportIndex = @enumFromInt(local_index);
symbol.pointee = .{ .function_import = function_import };
if (symbol.flags.explicit_name) {
const name = try br.take(try br.takeLeb128(u32));
symbol.name = (try wasm.internString(name)).toOptional();
}
},
.global => {
const local_index, pos = readLeb(u32, bytes, pos);
if (symbol.flags.undefined) {
const global_import: ScratchSpace.GlobalImportIndex = @enumFromInt(local_index);
symbol.pointee = .{ .global_import = global_import };
if (symbol.flags.explicit_name) {
const name, pos = readBytes(bytes, pos);
symbol.name = (try wasm.internString(name)).toOptional();
} else {
symbol.name = global_import.ptr(ss).name.toOptional();
}
} else {
symbol.pointee = .{ .global = @enumFromInt(globals_start + (local_index - ss.global_imports.items.len)) };
const name, pos = readBytes(bytes, pos);
symbol.name = (try wasm.internString(name)).toOptional();
symbol.name = function_import.ptr(ss).name.toOptional();
}
},
.table => {
const local_index, pos = readLeb(u32, bytes, pos);
if (symbol.flags.undefined) {
table_import_symbol_count += 1;
const table_import: ScratchSpace.TableImportIndex = @enumFromInt(local_index);
symbol.pointee = .{ .table_import = table_import };
if (symbol.flags.explicit_name) {
const name, pos = readBytes(bytes, pos);
symbol.name = (try wasm.internString(name)).toOptional();
} else {
symbol.name = table_import.ptr(ss).name.toOptional();
}
} else {
symbol.pointee = .{ .function = @enumFromInt(functions_start + (local_index - ss.func_imports.items.len)) };
const name = try br.take(try br.takeLeb128(u32));
symbol.name = (try wasm.internString(name)).toOptional();
}
},
.global => {
const local_index = try br.takeLeb128(u32);
if (symbol.flags.undefined) {
const global_import: ScratchSpace.GlobalImportIndex = @enumFromInt(local_index);
symbol.pointee = .{ .global_import = global_import };
if (symbol.flags.explicit_name) {
const name = try br.take(try br.takeLeb128(u32));
symbol.name = (try wasm.internString(name)).toOptional();
} else {
symbol.pointee = .{ .table = @enumFromInt(tables_start + (local_index - ss.table_imports.items.len)) };
const name, pos = readBytes(bytes, pos);
symbol.name = (try wasm.internString(name)).toOptional();
symbol.name = global_import.ptr(ss).name.toOptional();
}
},
else => {
log.debug("unrecognized symbol type tag: {x}", .{@intFromEnum(tag)});
return error.UnrecognizedSymbolType;
},
}
} else {
symbol.pointee = .{ .global = @enumFromInt(globals_start + (local_index - ss.global_imports.items.len)) };
const name = try br.take(try br.takeLeb128(u32));
symbol.name = (try wasm.internString(name)).toOptional();
}
},
.table => {
const local_index = try br.takeLeb128(u32);
if (symbol.flags.undefined) {
table_import_symbol_count += 1;
const table_import: ScratchSpace.TableImportIndex = @enumFromInt(local_index);
symbol.pointee = .{ .table_import = table_import };
if (symbol.flags.explicit_name) {
const name = try br.take(try br.takeLeb128(u32));
symbol.name = (try wasm.internString(name)).toOptional();
} else {
symbol.name = table_import.ptr(ss).name.toOptional();
}
} else {
symbol.pointee = .{ .table = @enumFromInt(tables_start + (local_index - ss.table_imports.items.len)) };
const name = try br.take(try br.takeLeb128(u32));
symbol.name = (try wasm.internString(name)).toOptional();
}
},
else => {
log.debug("unrecognized symbol type tag: {x}", .{@intFromEnum(tag)});
return error.UnrecognizedSymbolType;
},
}
},
}
@@ -504,8 +488,8 @@ pub fn parse(
// which section they apply to, and must be sequenced in
// the module after that section."
// "Relocation sections can only target code, data and custom sections."
const local_section, pos = readLeb(u32, bytes, pos);
const count, pos = readLeb(u32, bytes, pos);
const local_section = try br.takeLeb128(u32);
const count = try br.takeLeb128(u32);
const section: Wasm.ObjectSectionIndex = @enumFromInt(local_section_index_base + local_section);
log.debug("found {d} relocations for section={d}", .{ count, section });
@@ -513,10 +497,9 @@ pub fn parse(
var prev_offset: u32 = 0;
try wasm.object_relocations.ensureUnusedCapacity(gpa, count);
for (0..count) |_| {
const tag: RelocationType = @enumFromInt(bytes[pos]);
pos += 1;
const offset, pos = readLeb(u32, bytes, pos);
const index, pos = readLeb(u32, bytes, pos);
const tag = try br.takeEnum(RelocationType, .little);
const offset = try br.takeLeb128(u32);
const index = try br.takeLeb128(u32);
if (offset < prev_offset)
return diags.failParse(path, "relocation entries not sorted by offset", .{});
@@ -537,7 +520,7 @@ pub fn parse(
.memory_addr_locrel_i32,
.memory_addr_tls_sleb64,
=> {
const addend: i32, pos = readLeb(i32, bytes, pos);
const addend = try br.takeLeb128(i32);
wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) {
.data => |data| .{
.tag = .fromType(tag),
@@ -555,7 +538,7 @@ pub fn parse(
});
},
.function_offset_i32, .function_offset_i64 => {
const addend: i32, pos = readLeb(i32, bytes, pos);
const addend = try br.takeLeb128(i32);
wasm.object_relocations.appendAssumeCapacity(switch (sym.pointee) {
.function => .{
.tag = .fromType(tag),
@@ -573,7 +556,7 @@ pub fn parse(
});
},
.section_offset_i32 => {
const addend: i32, pos = readLeb(i32, bytes, pos);
const addend = try br.takeLeb128(i32);
wasm.object_relocations.appendAssumeCapacity(.{
.tag = .section_offset_i32,
.offset = offset,
@@ -658,10 +641,9 @@ pub fn parse(
.len = count,
});
} else if (std.mem.eql(u8, section_name, "target_features")) {
opt_features, pos = try parseFeatures(wasm, bytes, pos, path);
opt_features = try parseFeatures(wasm, br, path);
} else if (std.mem.startsWith(u8, section_name, ".debug")) {
const debug_content = bytes[pos..section_end];
pos = section_end;
const debug_content = try br.take(len);
const data_off: u32 = @intCast(wasm.string_bytes.items.len);
try wasm.string_bytes.appendSlice(gpa, debug_content);
@@ -669,23 +651,20 @@ pub fn parse(
try wasm.object_custom_segments.put(gpa, section_index, .{
.payload = .{
.off = @enumFromInt(data_off),
.len = @intCast(debug_content.len),
.len = @intCast(len),
},
.flags = .{},
.section_name = try wasm.internString(section_name),
});
} else {
pos = section_end;
}
} else br.seek = section_end;
},
.type => {
const func_types_len, pos = readLeb(u32, bytes, pos);
const func_types_len = try br.takeLeb128(u32);
for (try ss.func_types.addManyAsSlice(gpa, func_types_len)) |*func_type| {
if (bytes[pos] != std.wasm.function_type) return error.ExpectedFuncType;
pos += 1;
if (try br.takeByte() != std.wasm.function_type) return error.ExpectedFuncType;
const params, pos = readBytes(bytes, pos);
const returns, pos = readBytes(bytes, pos);
const params = try br.take(try br.takeLeb128(u32));
const returns = try br.take(try br.takeLeb128(u32));
func_type.* = try wasm.addFuncType(.{
.params = .fromString(try wasm.internString(params)),
.returns = .fromString(try wasm.internString(returns)),
@@ -693,16 +672,16 @@ pub fn parse(
}
},
.import => {
const imports_len, pos = readLeb(u32, bytes, pos);
const imports_len = try br.takeLeb128(u32);
for (0..imports_len) |_| {
const module_name, pos = readBytes(bytes, pos);
const name, pos = readBytes(bytes, pos);
const kind, pos = readEnum(std.wasm.ExternalKind, bytes, pos);
const module_name = try br.take(try br.takeLeb128(u32));
const name = try br.take(try br.takeLeb128(u32));
const kind = try br.takeEnum(std.wasm.ExternalKind, .little);
const interned_module_name = try wasm.internString(module_name);
const interned_name = try wasm.internString(name);
switch (kind) {
.function => {
const function, pos = readLeb(u32, bytes, pos);
const function = try br.takeLeb128(u32);
try ss.func_imports.append(gpa, .{
.module_name = interned_module_name,
.name = interned_name,
@@ -710,7 +689,7 @@ pub fn parse(
});
},
.memory => {
const limits, pos = readLimits(bytes, pos);
const limits = try readLimits(br);
const gop = try wasm.object_memory_imports.getOrPut(gpa, interned_name);
if (gop.found_existing) {
if (gop.value_ptr.module_name != interned_module_name) {
@@ -736,9 +715,12 @@ pub fn parse(
}
},
.global => {
const valtype, pos = readEnum(std.wasm.Valtype, bytes, pos);
const mutable = bytes[pos] == 0x01;
pos += 1;
const valtype = try br.takeEnum(std.wasm.Valtype, .little);
const mutable = switch (try br.takeByte()) {
0 => false,
1 => true,
else => return error.InvalidMutability,
};
try ss.global_imports.append(gpa, .{
.name = interned_name,
.valtype = valtype,
@@ -747,8 +729,8 @@ pub fn parse(
});
},
.table => {
const ref_type, pos = readEnum(std.wasm.RefType, bytes, pos);
const limits, pos = readLimits(bytes, pos);
const ref_type = try br.takeEnum(std.wasm.RefType, .little);
const limits = try readLimits(br);
try ss.table_imports.append(gpa, .{
.name = interned_name,
.module_name = interned_module_name,
@@ -763,17 +745,16 @@ pub fn parse(
}
},
.function => {
const functions_len, pos = readLeb(u32, bytes, pos);
const functions_len = try br.takeLeb128(u32);
for (try ss.func_type_indexes.addManyAsSlice(gpa, functions_len)) |*func_type_index| {
const i, pos = readLeb(u32, bytes, pos);
func_type_index.* = @enumFromInt(i);
func_type_index.* = @enumFromInt(try br.takeLeb128(u32));
}
},
.table => {
const tables_len, pos = readLeb(u32, bytes, pos);
const tables_len = try br.takeLeb128(u32);
for (try wasm.object_tables.addManyAsSlice(gpa, tables_len)) |*table| {
const ref_type, pos = readEnum(std.wasm.RefType, bytes, pos);
const limits, pos = readLimits(bytes, pos);
const ref_type = try br.takeEnum(std.wasm.RefType, .little);
const limits = try readLimits(br);
table.* = .{
.name = .none,
.module_name = .none,
@@ -788,9 +769,9 @@ pub fn parse(
}
},
.memory => {
const memories_len, pos = readLeb(u32, bytes, pos);
const memories_len = try br.takeLeb128(u32);
for (try wasm.object_memories.addManyAsSlice(gpa, memories_len)) |*memory| {
const limits, pos = readLimits(bytes, pos);
const limits = try readLimits(br);
memory.* = .{
.name = .none,
.flags = .{
@@ -807,14 +788,17 @@ pub fn parse(
return diags.failParse(path, "object has more than one global section", .{});
global_section_index = section_index;
const section_start = pos;
const globals_len, pos = readLeb(u32, bytes, pos);
const section_start = br.seek;
const globals_len = try br.takeLeb128(u32);
for (try wasm.object_globals.addManyAsSlice(gpa, globals_len)) |*global| {
const valtype, pos = readEnum(std.wasm.Valtype, bytes, pos);
const mutable = bytes[pos] == 0x01;
pos += 1;
const init_start = pos;
const expr, pos = try readInit(wasm, bytes, pos);
const valtype = try br.takeEnum(std.wasm.Valtype, .little);
const mutable = switch (try br.takeByte()) {
0 => false,
1 => true,
else => return error.InvalidMutability,
};
const init_start = br.seek;
const expr = try readInit(wasm, br);
global.* = .{
.name = .none,
.flags = .{
@@ -826,20 +810,19 @@ pub fn parse(
.expr = expr,
.object_index = object_index,
.offset = @intCast(init_start - section_start),
.size = @intCast(pos - init_start),
.size = @intCast(br.seek - init_start),
};
}
},
.@"export" => {
const exports_len, pos = readLeb(u32, bytes, pos);
const exports_len = try br.takeLeb128(u32);
// Read into scratch space, and then later add this data as if
// it were extra symbol table entries, but allow merging with
// existing symbol table data if the name matches.
for (try ss.exports.addManyAsSlice(gpa, exports_len)) |*exp| {
const name, pos = readBytes(bytes, pos);
const kind: std.wasm.ExternalKind = @enumFromInt(bytes[pos]);
pos += 1;
const index, pos = readLeb(u32, bytes, pos);
const name = try br.take(try br.takeLeb128(u32));
const kind = try br.takeEnum(std.wasm.ExternalKind, .little);
const index = try br.takeLeb128(u32);
exp.* = .{
.name = try wasm.internString(name),
.pointee = switch (kind) {
@@ -852,25 +835,24 @@ pub fn parse(
}
},
.start => {
const index, pos = readLeb(u32, bytes, pos);
const index = try br.takeLeb128(u32);
start_function = @enumFromInt(functions_start + index);
},
.element => {
log.warn("unimplemented: element section in {f} {?s}", .{ path, archive_member_name });
pos = section_end;
br.seek = section_end;
},
.code => {
if (code_section_index != null)
return diags.failParse(path, "object has more than one code section", .{});
code_section_index = section_index;
const start = pos;
const count, pos = readLeb(u32, bytes, pos);
const start = br.seek;
const count = try br.takeLeb128(u32);
for (try wasm.object_functions.addManyAsSlice(gpa, count)) |*elem| {
const code_len, pos = readLeb(u32, bytes, pos);
const offset: u32 = @intCast(pos - start);
const payload = try wasm.addRelocatableDataPayload(bytes[pos..][0..code_len]);
pos += code_len;
const code_len = try br.takeLeb128(u32);
const offset: u32 = @intCast(br.seek - start);
const payload = try wasm.addRelocatableDataPayload(try br.take(code_len));
elem.* = .{
.flags = .{}, // populated from symbol table
.name = .none, // populated from symbol table
@@ -886,20 +868,19 @@ pub fn parse(
return diags.failParse(path, "object has more than one data section", .{});
data_section_index = section_index;
const section_start = pos;
const count, pos = readLeb(u32, bytes, pos);
const section_start = br.seek;
const count = try br.takeLeb128(u32);
for (try wasm.object_data_segments.addManyAsSlice(gpa, count)) |*elem| {
const flags, pos = readEnum(DataSegmentFlags, bytes, pos);
const flags: DataSegmentFlags = @enumFromInt(try br.takeLeb128(u32));
if (flags == .active_memidx) {
const memidx, pos = readLeb(u32, bytes, pos);
const memidx = try br.takeLeb128(u32);
if (memidx != 0) return diags.failParse(path, "data section uses mem index {d}", .{memidx});
}
//const expr, pos = if (flags != .passive) try readInit(wasm, bytes, pos) else .{ .none, pos };
if (flags != .passive) pos = try skipInit(bytes, pos);
const data_len, pos = readLeb(u32, bytes, pos);
const segment_start = pos;
const payload = try wasm.addRelocatableDataPayload(bytes[pos..][0..data_len]);
pos += data_len;
//const expr = if (flags != .passive) try readInit(wasm, br) else .none;
if (flags != .passive) try skipInit(br);
const data_len = try br.takeLeb128(u32);
const segment_start = br.seek;
const payload = try wasm.addRelocatableDataPayload(try br.take(data_len));
elem.* = .{
.payload = payload,
.name = .none, // Populated from segment_info
@@ -911,10 +892,10 @@ pub fn parse(
};
}
},
else => pos = section_end,
else => br.seek = section_end,
}
if (pos != section_end) return error.MalformedSection;
}
if (br.seek != section_end) return error.MalformedSection;
} else |_| {}
if (!saw_linking_section) return error.MissingLinkingSection;
const cpu = comp.root_mod.resolved_target.result.cpu;
@@ -1422,27 +1403,27 @@ pub fn parse(
/// Based on the "features" custom section, parses it into a list of
/// features that tell the linker what features were enabled and may be mandatory
/// to be able to link.
fn parseFeatures(
wasm: *Wasm,
bytes: []const u8,
start_pos: usize,
path: Path,
) error{ OutOfMemory, LinkFailure }!struct { Wasm.Feature.Set, usize } {
fn parseFeatures(wasm: *Wasm, reader: *Reader, path: Path) error{ OutOfMemory, LinkFailure }!Wasm.Feature.Set {
const gpa = wasm.base.comp.gpa;
const diags = &wasm.base.comp.link_diags;
const features_len, var pos = readLeb(u32, bytes, start_pos);
const features_len = reader.takeLeb128(u32) catch |err|
return diags.failParse(path, "invalid features length: {t}", .{err});
// This temporary allocation could be avoided by using the string_bytes buffer as a scratch space.
const feature_buffer = try gpa.alloc(Wasm.Feature, features_len);
defer gpa.free(feature_buffer);
for (feature_buffer) |*feature| {
const prefix: Wasm.Feature.Prefix = switch (bytes[pos]) {
const prefix: Wasm.Feature.Prefix = switch (reader.takeByte() catch |err| {
return diags.failParse(path, "invalid feature prefix: {t}", .{err});
}) {
'-' => .@"-",
'+' => .@"+",
'=' => .@"=",
else => |b| return diags.failParse(path, "invalid feature prefix: 0x{x}", .{b}),
};
pos += 1;
const name, pos = readBytes(bytes, pos);
const name_len = reader.takeLeb128(u32) catch |err|
return diags.failParse(path, "bad feature name length: {t}", .{err});
const name = reader.take(name_len) catch |err|
return diags.failParse(path, "bad feature name: {t}", .{err});
const tag = std.meta.stringToEnum(Wasm.Feature.Tag, name) orelse {
return diags.failParse(path, "unrecognized wasm feature in object: {s}", .{name});
};
@@ -1453,68 +1434,34 @@ fn parseFeatures(
}
std.mem.sortUnstable(Wasm.Feature, feature_buffer, {}, Wasm.Feature.lessThan);
return .fromString(try wasm.internString(@ptrCast(feature_buffer)));
}
fn readLimits(reader: *Reader) !std.wasm.Limits {
const flags: std.wasm.Limits.Flags = @bitCast(try reader.takeByte());
const min = try reader.takeLeb128(u32);
const max = if (flags.has_max) try reader.takeLeb128(u32) else 0;
return .{
.fromString(try wasm.internString(@ptrCast(feature_buffer))),
pos,
};
}
fn readLeb(comptime T: type, bytes: []const u8, pos: usize) struct { T, usize } {
var fbr = std.io.fixedBufferStream(bytes[pos..]);
return .{
switch (@typeInfo(T).int.signedness) {
.signed => std.leb.readIleb128(T, fbr.reader()) catch unreachable,
.unsigned => std.leb.readUleb128(T, fbr.reader()) catch unreachable,
},
pos + fbr.pos,
};
}
fn readBytes(bytes: []const u8, start_pos: usize) struct { []const u8, usize } {
const len, const pos = readLeb(u32, bytes, start_pos);
return .{
bytes[pos..][0..len],
pos + len,
};
}
fn readEnum(comptime T: type, bytes: []const u8, pos: usize) struct { T, usize } {
const Tag = @typeInfo(T).@"enum".tag_type;
const int, const new_pos = readLeb(Tag, bytes, pos);
return .{ @enumFromInt(int), new_pos };
}
fn readLimits(bytes: []const u8, start_pos: usize) struct { std.wasm.Limits, usize } {
const flags: std.wasm.Limits.Flags = @bitCast(bytes[start_pos]);
const min, const max_pos = readLeb(u32, bytes, start_pos + 1);
const max, const end_pos = if (flags.has_max) readLeb(u32, bytes, max_pos) else .{ 0, max_pos };
return .{ .{
.flags = flags,
.min = min,
.max = max,
}, end_pos };
}
fn readInit(wasm: *Wasm, bytes: []const u8, pos: usize) !struct { Wasm.Expr, usize } {
const end_pos = try skipInit(bytes, pos); // one after the end opcode
return .{ try wasm.addExpr(bytes[pos..end_pos]), end_pos };
}
pub fn exprEndPos(bytes: []const u8, pos: usize) error{InvalidInitOpcode}!usize {
const opcode = bytes[pos];
return switch (@as(std.wasm.Opcode, @enumFromInt(opcode))) {
.i32_const => readLeb(i32, bytes, pos + 1)[1],
.i64_const => readLeb(i64, bytes, pos + 1)[1],
.f32_const => pos + 5,
.f64_const => pos + 9,
.global_get => readLeb(u32, bytes, pos + 1)[1],
else => return error.InvalidInitOpcode,
};
}
fn skipInit(bytes: []const u8, pos: usize) !usize {
const end_pos = try exprEndPos(bytes, pos);
const op, const final_pos = readEnum(std.wasm.Opcode, bytes, end_pos);
if (op != .end) return error.InitExprMissingEnd;
return final_pos;
fn readInit(wasm: *Wasm, reader: *Reader) !Wasm.Expr {
const start = reader.seek;
try skipInit(reader); // one after the end opcode
return wasm.addExpr(reader.buffered()[start..reader.seek]);
}
pub fn skipInit(reader: *Reader) !void {
switch (try reader.takeEnumNonexhaustive(std.wasm.Opcode, .little)) {
.i32_const => _ = try reader.takeLeb128(i32),
.i64_const => _ = try reader.takeLeb128(i64),
.f32_const => try reader.discardAll(5),
.f64_const => try reader.discardAll(9),
.global_get => _ = try reader.takeLeb128(u32),
else => return error.InvalidInitOpcode,
}
if (try reader.takeEnum(std.wasm.Opcode, .little) != .end) return error.InitExprMissingEnd;
}