mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-04-26 13:01:34 +03:00
stage2-wasm: start big int support
This commit is contained in:
committed by
Andrew Kelley
parent
3e0f55fcc7
commit
557caecaaa
@@ -279,6 +279,8 @@ comptime {
|
||||
_ = @import("compiler_rt/divmodei4.zig");
|
||||
_ = @import("compiler_rt/udivmodei4.zig");
|
||||
|
||||
_ = @import("compiler_rt/limb64.zig");
|
||||
|
||||
// extra
|
||||
_ = @import("compiler_rt/os_version_check.zig");
|
||||
_ = @import("compiler_rt/emutls.zig");
|
||||
|
||||
@@ -0,0 +1,266 @@
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
const maxInt = std.math.maxInt;
|
||||
const minInt = std.math.minInt;
|
||||
const divCeil = std.math.divCeil;
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const compiler_rt = @import("../compiler_rt.zig");
|
||||
|
||||
const endian = builtin.cpu.arch.endian();
|
||||
|
||||
inline fn limbGet(limbs: []const u64, i: usize) u64 {
|
||||
return switch (endian) {
|
||||
.little => limbs[i],
|
||||
.big => limbs[limbs.len - 1 - i],
|
||||
};
|
||||
}
|
||||
|
||||
inline fn limbSet(limbs: []u64, i: usize, value: u64) void {
|
||||
switch (endian) {
|
||||
.little => limbs[i] = value,
|
||||
.big => limbs[limbs.len - 1 - i] = value,
|
||||
}
|
||||
}
|
||||
|
||||
fn limbCount(bits: u16) u16 {
|
||||
return divCeil(u16, bits, 64) catch unreachable;
|
||||
}
|
||||
|
||||
fn Limbs(T: type) type {
|
||||
const int_info = @typeInfo(T).int;
|
||||
const limb_cnt = comptime limbCount(int_info.bits);
|
||||
return [limb_cnt]u64;
|
||||
}
|
||||
|
||||
fn asLimbs(v: anytype) Limbs(@TypeOf(v)) {
|
||||
const T = @TypeOf(v);
|
||||
const int_info = @typeInfo(T).int;
|
||||
const limb_cnt = comptime limbCount(int_info.bits);
|
||||
const ET = @Int(int_info.signedness, limb_cnt * 64);
|
||||
return @bitCast(@as(ET, v));
|
||||
}
|
||||
|
||||
fn limbWrap(limb: u64, is_signed: bool, bits: u16) u64 {
|
||||
assert(bits % 64 != 0);
|
||||
const pad_bits: u6 = @intCast(64 - bits % 64);
|
||||
if (!is_signed) {
|
||||
const s = limb << pad_bits;
|
||||
return s >> pad_bits;
|
||||
} else {
|
||||
const s = @as(i64, @bitCast(limb)) << pad_bits;
|
||||
return @bitCast(s >> pad_bits);
|
||||
}
|
||||
}
|
||||
|
||||
comptime {
|
||||
@export(&__addo_limb64, .{ .name = "__addo_limb64", .linkage = compiler_rt.linkage, .visibility = compiler_rt.visibility });
|
||||
}
|
||||
|
||||
fn __addo_limb64(out_ptr: [*]u64, a_ptr: [*]const u64, b_ptr: [*]const u64, is_signed: bool, bits: u16) callconv(.c) bool {
|
||||
const limb_cnt = limbCount(bits);
|
||||
const out = out_ptr[0..limb_cnt];
|
||||
const a = a_ptr[0..limb_cnt];
|
||||
const b = b_ptr[0..limb_cnt];
|
||||
|
||||
var carry: u1 = 0;
|
||||
var i: usize = 0;
|
||||
while (i < limb_cnt - 1) : (i += 1) {
|
||||
const s1 = @addWithOverflow(limbGet(a, i), limbGet(b, i));
|
||||
const s2 = @addWithOverflow(s1[0], carry);
|
||||
carry = s1[1] | s2[1];
|
||||
limbSet(out, i, s2[0]);
|
||||
}
|
||||
|
||||
const limb: u64 = b: {
|
||||
if (!is_signed) {
|
||||
const s1 = @addWithOverflow(limbGet(a, i), limbGet(b, i));
|
||||
const s2 = @addWithOverflow(s1[0], carry);
|
||||
carry = s1[1] | s2[1];
|
||||
break :b s2[0];
|
||||
} else {
|
||||
const as: i64 = @bitCast(limbGet(a, i));
|
||||
const bs: i64 = @bitCast(limbGet(b, i));
|
||||
const s1 = @addWithOverflow(as, bs);
|
||||
const s2 = @addWithOverflow(s1[0], carry);
|
||||
carry = s1[1] | s2[1];
|
||||
break :b @bitCast(s2[0]);
|
||||
}
|
||||
};
|
||||
|
||||
if (bits % 64 == 0) {
|
||||
limbSet(out, i, limb);
|
||||
return carry != 0;
|
||||
} else {
|
||||
assert(carry == 0);
|
||||
const wrapped_limb = limbWrap(limb, is_signed, bits);
|
||||
limbSet(out, i, wrapped_limb);
|
||||
return wrapped_limb != limb;
|
||||
}
|
||||
}
|
||||
|
||||
fn test__addo_limb64(comptime T: type, a: T, b: T, expected: struct { T, bool }) !void {
|
||||
const int_info = @typeInfo(T).int;
|
||||
const is_signed = int_info.signedness == .signed;
|
||||
|
||||
var a_limbs = asLimbs(a);
|
||||
var b_limbs = asLimbs(b);
|
||||
var out: Limbs(T) = undefined;
|
||||
const overflow = __addo_limb64(&out, &a_limbs, &b_limbs, is_signed, int_info.bits);
|
||||
|
||||
const expected_limbs = asLimbs(expected[0]);
|
||||
try testing.expectEqual(expected_limbs, out);
|
||||
try testing.expectEqual(expected[1], overflow);
|
||||
}
|
||||
|
||||
test __addo_limb64 {
|
||||
try test__addo_limb64(u64, 1, 2, .{ 3, false });
|
||||
try test__addo_limb64(u64, maxInt(u64), 2, .{ 1, true });
|
||||
try test__addo_limb64(u65, maxInt(u65), 2, .{ 1, true });
|
||||
try test__addo_limb64(u255, 1, 2, .{ 3, false });
|
||||
|
||||
try test__addo_limb64(i64, 1, 2, .{ 3, false });
|
||||
try test__addo_limb64(i64, maxInt(i64), 1, .{ minInt(i64), true });
|
||||
try test__addo_limb64(i65, maxInt(i65), 1, .{ minInt(i65), true });
|
||||
try test__addo_limb64(i255, -3, 2, .{ -1, false });
|
||||
}
|
||||
|
||||
comptime {
|
||||
@export(&__subo_limb64, .{ .name = "__subo_limb64", .linkage = compiler_rt.linkage, .visibility = compiler_rt.visibility });
|
||||
}
|
||||
|
||||
fn __subo_limb64(out_ptr: [*]u64, a_ptr: [*]const u64, b_ptr: [*]const u64, is_signed: bool, bits: u16) callconv(.c) bool {
|
||||
const limb_cnt = limbCount(bits);
|
||||
const out = out_ptr[0..limb_cnt];
|
||||
const a = a_ptr[0..limb_cnt];
|
||||
const b = b_ptr[0..limb_cnt];
|
||||
|
||||
var borrow: u1 = 0;
|
||||
var i: usize = 0;
|
||||
while (i < limb_cnt - 1) : (i += 1) {
|
||||
const s1 = @subWithOverflow(limbGet(a, i), limbGet(b, i));
|
||||
const s2 = @subWithOverflow(s1[0], borrow);
|
||||
borrow = s1[1] | s2[1];
|
||||
limbSet(out, i, s2[0]);
|
||||
}
|
||||
|
||||
const limb: u64 = b: {
|
||||
if (!is_signed) {
|
||||
const s1 = @subWithOverflow(limbGet(a, i), limbGet(b, i));
|
||||
const s2 = @subWithOverflow(s1[0], borrow);
|
||||
borrow = s1[1] | s2[1];
|
||||
break :b s2[0];
|
||||
} else {
|
||||
const as: i64 = @bitCast(limbGet(a, i));
|
||||
const bs: i64 = @bitCast(limbGet(b, i));
|
||||
const s1 = @subWithOverflow(as, bs);
|
||||
const s2 = @subWithOverflow(s1[0], borrow);
|
||||
borrow = s1[1] | s2[1];
|
||||
break :b @bitCast(s2[0]);
|
||||
}
|
||||
};
|
||||
|
||||
if (bits % 64 == 0) {
|
||||
limbSet(out, i, limb);
|
||||
return borrow != 0;
|
||||
} else {
|
||||
const wrapped_limb = limbWrap(limb, is_signed, bits);
|
||||
limbSet(out, i, wrapped_limb);
|
||||
return borrow != 0 or wrapped_limb != limb;
|
||||
}
|
||||
}
|
||||
|
||||
fn test__subo_limb64(comptime T: type, a: T, b: T, expected: struct { T, bool }) !void {
|
||||
const int_info = @typeInfo(T).int;
|
||||
const is_signed = int_info.signedness == .signed;
|
||||
|
||||
var a_limbs = asLimbs(a);
|
||||
var b_limbs = asLimbs(b);
|
||||
var out: Limbs(T) = undefined;
|
||||
const overflow = __subo_limb64(&out, &a_limbs, &b_limbs, is_signed, int_info.bits);
|
||||
|
||||
const expected_limbs = asLimbs(expected[0]);
|
||||
try testing.expectEqual(expected_limbs, out);
|
||||
try testing.expectEqual(expected[1], overflow);
|
||||
}
|
||||
|
||||
test __subo_limb64 {
|
||||
try test__subo_limb64(u64, 3, 2, .{ 1, false });
|
||||
try test__subo_limb64(u64, 0, 1, .{ maxInt(u64), true });
|
||||
try test__subo_limb64(u65, 0, 1, .{ maxInt(u65), true });
|
||||
try test__subo_limb64(u255, 3, 2, .{ 1, false });
|
||||
|
||||
try test__subo_limb64(i64, 1, 2, .{ -1, false });
|
||||
try test__subo_limb64(i64, minInt(i64), 1, .{ maxInt(i64), true });
|
||||
try test__subo_limb64(i65, minInt(i65), 1, .{ maxInt(i65), true });
|
||||
try test__subo_limb64(i255, -1, 2, .{ -3, false });
|
||||
}
|
||||
|
||||
comptime {
|
||||
@export(&__cmp_limb64, .{ .name = "__cmp_limb64", .linkage = compiler_rt.linkage, .visibility = compiler_rt.visibility });
|
||||
}
|
||||
|
||||
// a < b -> -1
|
||||
// a == b -> 0
|
||||
// a > b -> 1
|
||||
fn __cmp_limb64(a_ptr: [*]const u64, b_ptr: [*]const u64, is_signed: bool, bits: u16) callconv(.c) i8 {
|
||||
const limb_cnt = limbCount(bits);
|
||||
const a = a_ptr[0..limb_cnt];
|
||||
const b = b_ptr[0..limb_cnt];
|
||||
|
||||
var i: usize = 0;
|
||||
if (is_signed) {
|
||||
const sa: i64 = @bitCast(limbGet(a, limb_cnt - 1));
|
||||
const sb: i64 = @bitCast(limbGet(b, limb_cnt - 1));
|
||||
if (sa < sb) return -1;
|
||||
if (sa > sb) return 1;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
while (i < limb_cnt) : (i += 1) {
|
||||
const ai = limbGet(a, limb_cnt - 1 - i);
|
||||
const bi = limbGet(b, limb_cnt - 1 - i);
|
||||
if (ai < bi) return -1;
|
||||
if (ai > bi) return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
fn test__cmp_limb64(comptime T: type, a: T, b: T, expected: i8) !void {
|
||||
const int_info = @typeInfo(T).int;
|
||||
const is_signed = int_info.signedness == .signed;
|
||||
|
||||
var a_limbs = asLimbs(a);
|
||||
var b_limbs = asLimbs(b);
|
||||
const actual = __cmp_limb64(&a_limbs, &b_limbs, is_signed, int_info.bits);
|
||||
|
||||
try testing.expectEqual(expected, actual);
|
||||
}
|
||||
|
||||
test __cmp_limb64 {
|
||||
try test__cmp_limb64(u64, 1, 2, -1);
|
||||
try test__cmp_limb64(u64, 2, 2, 0);
|
||||
try test__cmp_limb64(u64, 3, 2, 1);
|
||||
|
||||
try test__cmp_limb64(u65, 1, 2, -1);
|
||||
try test__cmp_limb64(u65, maxInt(u65), maxInt(u65), 0);
|
||||
try test__cmp_limb64(u65, maxInt(u65), maxInt(u65) - 1, 1);
|
||||
|
||||
try test__cmp_limb64(u255, 1, 2, -1);
|
||||
try test__cmp_limb64(u255, 7, 7, 0);
|
||||
try test__cmp_limb64(u255, maxInt(u255), maxInt(u255) - 1, 1);
|
||||
|
||||
try test__cmp_limb64(i64, -1, 0, -1);
|
||||
try test__cmp_limb64(i64, 0, 0, 0);
|
||||
try test__cmp_limb64(i64, 1, 0, 1);
|
||||
|
||||
try test__cmp_limb64(i65, minInt(i65), maxInt(i65), -1);
|
||||
try test__cmp_limb64(i65, -1, -1, 0);
|
||||
try test__cmp_limb64(i65, maxInt(i65), minInt(i65), 1);
|
||||
|
||||
try test__cmp_limb64(i255, -3, 2, -1);
|
||||
try test__cmp_limb64(i255, -5, -5, 0);
|
||||
try test__cmp_limb64(i255, 2, -3, 1);
|
||||
}
|
||||
+107
-25
@@ -498,6 +498,10 @@ fn addAtomicTag(cg: *CodeGen, tag: std.wasm.AtomicsOpcode) error{OutOfMemory}!vo
|
||||
try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
|
||||
}
|
||||
|
||||
fn addCallIntrinsic(cg: *CodeGen, intrinsic: Mir.Intrinsic) error{OutOfMemory}!void {
|
||||
try cg.addInst(.{ .tag = .call_intrinsic, .data = .{ .intrinsic = intrinsic } });
|
||||
}
|
||||
|
||||
/// Appends entries to `mir_extra` based on the type of `extra`.
|
||||
/// Returns the index into `mir_extra`
|
||||
fn addExtra(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
|
||||
@@ -1010,6 +1014,24 @@ fn allocStack(cg: *CodeGen, ty: Type) !WValue {
|
||||
return .{ .stack_offset = .{ .value = offset, .references = 1 } };
|
||||
}
|
||||
|
||||
fn allocInt(cg: *CodeGen, int_ty: IntType) !WValue {
|
||||
if (cg.initial_stack_value == .none) {
|
||||
try cg.initializeStack();
|
||||
}
|
||||
|
||||
const abi_size = std.math.cast(u32, std.zig.target.intByteSize(cg.target, int_ty.bits)) orelse {
|
||||
return cg.fail("Integer ABI size exceeds max stack size", .{});
|
||||
};
|
||||
const abi_align: Alignment = .fromByteUnits(std.zig.target.intAlignment(cg.target, int_ty.bits));
|
||||
|
||||
cg.stack_alignment = cg.stack_alignment.max(abi_align);
|
||||
|
||||
const offset: u32 = @intCast(abi_align.forward(cg.stack_size));
|
||||
defer cg.stack_size = offset + abi_size;
|
||||
|
||||
return .{ .stack_offset = .{ .value = offset, .references = 1 } };
|
||||
}
|
||||
|
||||
/// From a given AIR instruction generates a pointer to the stack where
|
||||
/// the value of its type will live.
|
||||
/// This is different from allocStack where this will use the pointer's alignment
|
||||
@@ -2393,7 +2415,18 @@ fn intAdd(cg: *CodeGen, ty: IntType, lhs: WValue, rhs: WValue) InnerError!WValue
|
||||
try cg.store(result, tmp_op, Type.u64, 8);
|
||||
return result;
|
||||
},
|
||||
else => return cg.fail("TODO: Support intAdd for integer bitsize: {d}", .{ty.bits}),
|
||||
else => {
|
||||
const result = try cg.allocInt(ty);
|
||||
|
||||
try cg.lowerToStack(result);
|
||||
try cg.lowerToStack(lhs);
|
||||
try cg.lowerToStack(rhs);
|
||||
try cg.addImm32(@intFromBool(ty.is_signed));
|
||||
try cg.addImm32(ty.bits);
|
||||
try cg.addCallIntrinsic(.__addo_limb64);
|
||||
try cg.addTag(.drop);
|
||||
return result;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2435,7 +2468,19 @@ fn intSub(cg: *CodeGen, ty: IntType, lhs: WValue, rhs: WValue) InnerError!WValue
|
||||
try cg.store(result, tmp_op, Type.u64, 8);
|
||||
return result;
|
||||
},
|
||||
else => return cg.fail("TODO: Support intSub for integer bitsize: {d}", .{ty.bits}),
|
||||
else => {
|
||||
const result = try cg.allocInt(ty);
|
||||
|
||||
try cg.lowerToStack(result);
|
||||
try cg.lowerToStack(lhs);
|
||||
try cg.lowerToStack(rhs);
|
||||
try cg.addImm32(@intFromBool(ty.is_signed));
|
||||
try cg.addImm32(ty.bits);
|
||||
try cg.addCallIntrinsic(.__subo_limb64);
|
||||
try cg.addTag(.drop);
|
||||
|
||||
return result;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3434,45 +3479,66 @@ const OverflowResult = struct {
|
||||
ov: WValue,
|
||||
};
|
||||
|
||||
fn intAddOverflow(cg: *CodeGen, int_ty: IntType, lhs: WValue, rhs: WValue) InnerError!OverflowResult {
|
||||
switch (int_ty.bits) {
|
||||
fn intAddOverflow(cg: *CodeGen, ty: IntType, lhs: WValue, rhs: WValue) InnerError!OverflowResult {
|
||||
switch (ty.bits) {
|
||||
0 => unreachable,
|
||||
1...128 => {
|
||||
const raw_result = try cg.intAdd(int_ty, lhs, rhs);
|
||||
const op_result = try cg.intWrap(int_ty, raw_result);
|
||||
const op_tmp = try cg.toLocalInt(op_result, int_ty);
|
||||
const raw_result = try cg.intAdd(ty, lhs, rhs);
|
||||
const op_result = try cg.intWrap(ty, raw_result);
|
||||
const op_tmp = try cg.toLocalInt(op_result, ty);
|
||||
|
||||
const overflow_bit = if (int_ty.is_signed) blk: {
|
||||
const zero = try cg.intZeroValue(int_ty);
|
||||
const rhs_is_neg = try cg.intCmp(int_ty, .lt, rhs, zero);
|
||||
const overflow_cmp = try cg.intCmp(int_ty, .lt, op_tmp, lhs);
|
||||
const overflow_bit = if (ty.is_signed) blk: {
|
||||
const zero = try cg.intZeroValue(ty);
|
||||
const rhs_is_neg = try cg.intCmp(ty, .lt, rhs, zero);
|
||||
const overflow_cmp = try cg.intCmp(ty, .lt, op_tmp, lhs);
|
||||
break :blk try cg.intCmp(.u32, .neq, rhs_is_neg, overflow_cmp);
|
||||
} else try cg.intCmp(int_ty, .lt, op_tmp, lhs);
|
||||
} else try cg.intCmp(ty, .lt, op_tmp, lhs);
|
||||
|
||||
return .{ .result = op_tmp, .ov = overflow_bit };
|
||||
},
|
||||
else => return cg.fail("TODO: Support intAddOverflow for integer bitsize: {d}", .{int_ty.bits}),
|
||||
else => {
|
||||
const result = try cg.allocInt(ty);
|
||||
|
||||
try cg.lowerToStack(result);
|
||||
try cg.lowerToStack(lhs);
|
||||
try cg.lowerToStack(rhs);
|
||||
try cg.addImm32(@intFromBool(ty.is_signed));
|
||||
try cg.addImm32(ty.bits);
|
||||
try cg.addCallIntrinsic(.__addo_limb64);
|
||||
|
||||
return .{ .result = result, .ov = .stack };
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn intSubOverflow(cg: *CodeGen, int_ty: IntType, lhs: WValue, rhs: WValue) InnerError!OverflowResult {
|
||||
switch (int_ty.bits) {
|
||||
fn intSubOverflow(cg: *CodeGen, ty: IntType, lhs: WValue, rhs: WValue) InnerError!OverflowResult {
|
||||
switch (ty.bits) {
|
||||
0 => unreachable,
|
||||
1...128 => {
|
||||
const raw_result = try cg.intSub(int_ty, lhs, rhs);
|
||||
const op_result = try cg.intWrap(int_ty, raw_result);
|
||||
const op_tmp = try cg.toLocalInt(op_result, int_ty);
|
||||
const raw_result = try cg.intSub(ty, lhs, rhs);
|
||||
const op_result = try cg.intWrap(ty, raw_result);
|
||||
const op_tmp = try cg.toLocalInt(op_result, ty);
|
||||
|
||||
const overflow_bit = if (int_ty.is_signed) blk: {
|
||||
const zero = try cg.intZeroValue(int_ty);
|
||||
const rhs_is_neg = try cg.intCmp(int_ty, .lt, rhs, zero);
|
||||
const overflow_cmp = try cg.intCmp(int_ty, .gt, op_tmp, lhs);
|
||||
const overflow_bit = if (ty.is_signed) blk: {
|
||||
const zero = try cg.intZeroValue(ty);
|
||||
const rhs_is_neg = try cg.intCmp(ty, .lt, rhs, zero);
|
||||
const overflow_cmp = try cg.intCmp(ty, .gt, op_tmp, lhs);
|
||||
break :blk try cg.intCmp(.u32, .neq, rhs_is_neg, overflow_cmp);
|
||||
} else try cg.intCmp(int_ty, .gt, op_tmp, lhs);
|
||||
} else try cg.intCmp(ty, .gt, op_tmp, lhs);
|
||||
|
||||
return .{ .result = op_tmp, .ov = overflow_bit };
|
||||
},
|
||||
else => return cg.fail("TODO: Support intSubOverflow for integer bitsize: {d}", .{int_ty.bits}),
|
||||
else => {
|
||||
const result = try cg.allocInt(ty);
|
||||
|
||||
try cg.lowerToStack(result);
|
||||
try cg.lowerToStack(lhs);
|
||||
try cg.lowerToStack(rhs);
|
||||
try cg.addImm32(@intFromBool(ty.is_signed));
|
||||
try cg.addImm32(ty.bits);
|
||||
try cg.addCallIntrinsic(.__subo_limb64);
|
||||
return .{ .result = result, .ov = .stack };
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4764,7 +4830,23 @@ fn intCmp(cg: *CodeGen, ty: IntType, op: std.math.CompareOperator, lhs: WValue,
|
||||
|
||||
return .stack;
|
||||
},
|
||||
else => return cg.fail("TODO: Support intCmp for integer bitsize: {d}", .{ty.bits}),
|
||||
else => {
|
||||
try cg.lowerToStack(lhs);
|
||||
try cg.lowerToStack(rhs);
|
||||
try cg.addImm32(@intFromBool(ty.is_signed));
|
||||
try cg.addImm32(ty.bits);
|
||||
try cg.addCallIntrinsic(.__cmp_limb64);
|
||||
try cg.addImm32(0);
|
||||
try cg.addTag(switch (op) {
|
||||
.eq => .i32_eq,
|
||||
.neq => .i32_ne,
|
||||
.lt => .i32_lt_s,
|
||||
.lte => .i32_le_s,
|
||||
.gte => .i32_ge_s,
|
||||
.gt => .i32_gt_s,
|
||||
});
|
||||
return .stack;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -352,6 +352,7 @@ pub fn lowerToCode(emit: *Emit) Error!void {
|
||||
.end,
|
||||
.@"return",
|
||||
.@"unreachable",
|
||||
.drop,
|
||||
.select,
|
||||
.i32_eqz,
|
||||
.i32_eq,
|
||||
|
||||
@@ -169,6 +169,10 @@ pub const Inst = struct {
|
||||
call_tag_name,
|
||||
/// Lowers to a `call` instruction, using `intrinsic`.
|
||||
call_intrinsic,
|
||||
/// Pops a value from the stack, and discards it.
|
||||
///
|
||||
/// Uses `tag` (no additional data).
|
||||
drop = 0x1A,
|
||||
/// Pops three values from the stack and pushes
|
||||
/// the first or second value dependent on the third value.
|
||||
/// Uses `tag`
|
||||
@@ -1000,4 +1004,7 @@ pub const Intrinsic = enum(u32) {
|
||||
tanf,
|
||||
tanq,
|
||||
truncq,
|
||||
__addo_limb64,
|
||||
__subo_limb64,
|
||||
__cmp_limb64,
|
||||
};
|
||||
|
||||
@@ -1248,6 +1248,20 @@ test "integer compare <= 128 bits" {
|
||||
}
|
||||
}
|
||||
|
||||
test "integer compare > 128 bits" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
inline for (.{ u129, u255, u512, u800 }) |T| {
|
||||
try testUnsignedCmp(T);
|
||||
try comptime testUnsignedCmp(T);
|
||||
}
|
||||
inline for (.{ i129, i255, i512, i800 }) |T| {
|
||||
try testSignedCmp(T);
|
||||
try comptime testSignedCmp(T);
|
||||
}
|
||||
}
|
||||
|
||||
test "reference to inferred local variable works as expected" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ test "int comparison elision" {
|
||||
testIntEdges(i4);
|
||||
|
||||
// TODO: support int types > 128 bits wide in other backends
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
|
||||
+88
-2
@@ -877,7 +877,7 @@ test "@addWithOverflow" {
|
||||
try testAddWithOverflow(isize, minInt(isize), -6, maxInt(isize) - 5, 1);
|
||||
}
|
||||
|
||||
test "@addWithOverflow > 64 bits" {
|
||||
test "@addWithOverflow <= 128 bits" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
@@ -920,6 +920,50 @@ test "@addWithOverflow > 64 bits" {
|
||||
try testAddWithOverflow(i128, maxInt(i128), maxInt(i128) - 1, -3, 1);
|
||||
}
|
||||
|
||||
test "@addWithOverflow > 128 bits" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c and builtin.target.abi == .msvc) return error.SkipZigTest;
|
||||
|
||||
try testAddWithOverflow(u129, 4, 105, 109, 0);
|
||||
try testAddWithOverflow(u129, 1000, 100, 1100, 0);
|
||||
try testAddWithOverflow(u129, 100, maxInt(u129) - 99, 0, 1);
|
||||
try testAddWithOverflow(u129, maxInt(u129), maxInt(u129), maxInt(u129) - 1, 1);
|
||||
try testAddWithOverflow(u129, maxInt(u129) - 1, maxInt(u129), maxInt(u129) - 2, 1);
|
||||
try testAddWithOverflow(u129, maxInt(u129), maxInt(u129) - 1, maxInt(u129) - 2, 1);
|
||||
|
||||
try testAddWithOverflow(u400, 4, 105, 109, 0);
|
||||
try testAddWithOverflow(u400, 1000, 100, 1100, 0);
|
||||
try testAddWithOverflow(u400, 100, maxInt(u400) - 99, 0, 1);
|
||||
try testAddWithOverflow(u400, maxInt(u400), maxInt(u400), maxInt(u400) - 1, 1);
|
||||
try testAddWithOverflow(u400, maxInt(u400) - 1, maxInt(u400), maxInt(u400) - 2, 1);
|
||||
try testAddWithOverflow(u400, maxInt(u400), maxInt(u400) - 1, maxInt(u400) - 2, 1);
|
||||
|
||||
try testAddWithOverflow(i129, 4, -105, -101, 0);
|
||||
try testAddWithOverflow(i129, 1000, 100, 1100, 0);
|
||||
try testAddWithOverflow(i129, minInt(i129), 1, minInt(i129) + 1, 0);
|
||||
try testAddWithOverflow(i129, maxInt(i129), minInt(i129), -1, 0);
|
||||
try testAddWithOverflow(i129, minInt(i129), maxInt(i129), -1, 0);
|
||||
try testAddWithOverflow(i129, maxInt(i129), -2, maxInt(i129) - 2, 0);
|
||||
try testAddWithOverflow(i129, maxInt(i129), maxInt(i129), -2, 1);
|
||||
try testAddWithOverflow(i129, minInt(i129), minInt(i129), 0, 1);
|
||||
try testAddWithOverflow(i129, maxInt(i129) - 1, maxInt(i129), -3, 1);
|
||||
try testAddWithOverflow(i129, maxInt(i129), maxInt(i129) - 1, -3, 1);
|
||||
|
||||
try testAddWithOverflow(i400, 4, -105, -101, 0);
|
||||
try testAddWithOverflow(i400, 1000, 100, 1100, 0);
|
||||
try testAddWithOverflow(i400, minInt(i400), 1, minInt(i400) + 1, 0);
|
||||
try testAddWithOverflow(i400, maxInt(i400), minInt(i400), -1, 0);
|
||||
try testAddWithOverflow(i400, minInt(i400), maxInt(i400), -1, 0);
|
||||
try testAddWithOverflow(i400, maxInt(i400), -2, maxInt(i400) - 2, 0);
|
||||
try testAddWithOverflow(i400, maxInt(i400), maxInt(i400), -2, 1);
|
||||
try testAddWithOverflow(i400, minInt(i400), minInt(i400), 0, 1);
|
||||
try testAddWithOverflow(i400, maxInt(i400) - 1, maxInt(i400), -3, 1);
|
||||
try testAddWithOverflow(i400, maxInt(i400), maxInt(i400) - 1, -3, 1);
|
||||
}
|
||||
|
||||
test "small int addition" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@@ -1119,7 +1163,7 @@ test "@subWithOverflow" {
|
||||
try testSubWithOverflow(isize, minInt(isize), 6, maxInt(isize) - 5, 1);
|
||||
}
|
||||
|
||||
test "@subWithOverflow > 64 bits" {
|
||||
test "@subWithOverflow <= 128 bits" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
@@ -1160,6 +1204,48 @@ test "@subWithOverflow > 64 bits" {
|
||||
try testSubWithOverflow(i128, maxInt(i128), -2, minInt(i128) + 1, 1);
|
||||
}
|
||||
|
||||
test "@subWithOverflow > 128 bits" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c and builtin.target.abi == .msvc) return error.SkipZigTest;
|
||||
|
||||
try testSubWithOverflow(u129, 4, 105, maxInt(u129) - 100, 1);
|
||||
try testSubWithOverflow(u129, 1000, 100, 900, 0);
|
||||
try testSubWithOverflow(u129, maxInt(u129), maxInt(u129), 0, 0);
|
||||
try testSubWithOverflow(u129, maxInt(u129) - 1, maxInt(u129), maxInt(u129), 1);
|
||||
try testSubWithOverflow(u129, maxInt(u129), maxInt(u129) - 1, 1, 0);
|
||||
|
||||
try testSubWithOverflow(u400, 4, 105, maxInt(u400) - 100, 1);
|
||||
try testSubWithOverflow(u400, 1000, 100, 900, 0);
|
||||
try testSubWithOverflow(u400, maxInt(u400), maxInt(u400), 0, 0);
|
||||
try testSubWithOverflow(u400, maxInt(u400) - 1, maxInt(u400), maxInt(u400), 1);
|
||||
try testSubWithOverflow(u400, maxInt(u400), maxInt(u400) - 1, 1, 0);
|
||||
|
||||
try testSubWithOverflow(i129, 4, 105, -101, 0);
|
||||
try testSubWithOverflow(i129, 1000, 100, 900, 0);
|
||||
try testSubWithOverflow(i129, maxInt(i129), maxInt(i129), 0, 0);
|
||||
try testSubWithOverflow(i129, minInt(i129), minInt(i129), 0, 0);
|
||||
try testSubWithOverflow(i129, maxInt(i129) - 1, maxInt(i129), -1, 0);
|
||||
try testSubWithOverflow(i129, maxInt(i129), maxInt(i129) - 1, 1, 0);
|
||||
try testSubWithOverflow(i129, minInt(i129), 1, maxInt(i129), 1);
|
||||
try testSubWithOverflow(i129, maxInt(i129), minInt(i129), -1, 1);
|
||||
try testSubWithOverflow(i129, minInt(i129), maxInt(i129), 1, 1);
|
||||
try testSubWithOverflow(i129, maxInt(i129), -2, minInt(i129) + 1, 1);
|
||||
|
||||
try testSubWithOverflow(i400, 4, 105, -101, 0);
|
||||
try testSubWithOverflow(i400, 1000, 100, 900, 0);
|
||||
try testSubWithOverflow(i400, maxInt(i400), maxInt(i400), 0, 0);
|
||||
try testSubWithOverflow(i400, minInt(i400), minInt(i400), 0, 0);
|
||||
try testSubWithOverflow(i400, maxInt(i400) - 1, maxInt(i400), -1, 0);
|
||||
try testSubWithOverflow(i400, maxInt(i400), maxInt(i400) - 1, 1, 0);
|
||||
try testSubWithOverflow(i400, minInt(i400), 1, maxInt(i400), 1);
|
||||
try testSubWithOverflow(i400, maxInt(i400), minInt(i400), -1, 1);
|
||||
try testSubWithOverflow(i400, minInt(i400), maxInt(i400), 1, 1);
|
||||
try testSubWithOverflow(i400, maxInt(i400), -2, minInt(i400) + 1, 1);
|
||||
}
|
||||
|
||||
fn testShlWithOverflow(comptime T: type, a: T, b: math.Log2Int(T), shl: T, bit: u1) !void {
|
||||
const ov = @shlWithOverflow(a, b);
|
||||
try expect(ov[0] == shl);
|
||||
|
||||
Reference in New Issue
Block a user