stage2-wasm: vector, std tests

This commit is contained in:
Pavel Verigo
2026-04-20 20:49:35 +02:00
committed by Andrew Kelley
parent 36faf76fe1
commit 22945fbbdc
30 changed files with 554 additions and 460 deletions
+9 -4
View File
@@ -1503,7 +1503,7 @@ fn scalarizeBitcastBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Error!?
l,
.store,
index_ptr,
.fromValue(try pt.intValue(.usize, operand_ty.arrayLen(zcu))),
.fromValue(try pt.intValue(.usize, operand_ty.arrayLen(zcu) - 1)),
);
_ = uint_block.addBinOp(l, .store, result_ptr, .fromValue(try pt.intValue(uint_ty, 0)));
@@ -1811,6 +1811,7 @@ fn scalarizeReduceBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, optimize
.Or, .Xor, .Add => switch (scalar_ty.zigTypeTag(zcu)) {
.int => try pt.intValue(scalar_ty, 0),
.float => try pt.floatValue(scalar_ty, 0.0),
.bool => .false,
else => unreachable,
},
// identity for multiplication is 1
@@ -1820,9 +1821,13 @@ fn scalarizeReduceBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, optimize
else => unreachable,
},
// identity for AND is all 1 bits
.And => switch (scalar_ty.intInfo(zcu).signedness) {
.unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty),
.signed => try pt.intValue(scalar_ty, -1),
.And => switch (scalar_ty.zigTypeTag(zcu)) {
.int => switch (scalar_ty.intInfo(zcu).signedness) {
.unsigned => try scalar_ty.maxIntScalar(pt, scalar_ty),
.signed => try pt.intValue(scalar_ty, -1),
},
.bool => .true,
else => unreachable,
},
// identity for @min is maximum value
.Min => switch (scalar_ty.zigTypeTag(zcu)) {
+2 -2
View File
@@ -935,7 +935,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
const bytes = ((elem_bits * vector_type.len) + 7) / 8;
return .fromByteUnits(std.math.ceilPowerOfTwoAssert(u32, bytes));
},
.stage2_c => return Type.fromInterned(vector_type.child).abiAlignment(zcu),
.stage2_c, .stage2_wasm => return Type.fromInterned(vector_type.child).abiAlignment(zcu),
.stage2_x86_64 => {
if (vector_type.child == .bool_type) {
if (vector_type.len > 256 and target.cpu.has(.x86, .avx512f)) return .@"64";
@@ -1084,7 +1084,7 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
const elem_ty: Type = .fromInterned(vec.child);
const bytes = switch (zcu.comp.getZigBackend()) {
else => std.math.divCeil(u64, vec.len * elem_ty.bitSize(zcu), 8) catch unreachable,
.stage2_c => vec.len * elem_ty.abiSize(zcu),
.stage2_c, .stage2_wasm => vec.len * elem_ty.abiSize(zcu),
.stage2_x86_64 => switch (elem_ty.toIntern()) {
.bool_type => std.math.divCeil(u64, vec.len, 8) catch unreachable,
else => vec.len * elem_ty.abiSize(zcu),
+5 -1
View File
@@ -484,7 +484,11 @@ pub fn generateSymbol(
},
.vector_type => |vector_type| {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
if (vector_type.child == .bool_type) {
const vector_bool_bitpacked = switch (zcu.comp.getZigBackend()) {
.stage2_wasm => false,
else => true,
};
if (vector_type.child == .bool_type and vector_bool_bitpacked) {
const bytes = try w.writableSlice(abi_size);
@memset(bytes, 0xaa);
var index: usize = 0;
+375 -268
View File
@@ -43,6 +43,83 @@ pub fn legalizeFeatures(_: *const std.Target) *const Air.Legalize.Features {
.expand_packed_store,
.expand_packed_struct_field_val,
.expand_packed_aggregate_init,
.scalarize_add,
.scalarize_add_optimized,
.scalarize_add_wrap,
.scalarize_add_sat,
.scalarize_sub,
.scalarize_sub_optimized,
.scalarize_sub_wrap,
.scalarize_sub_sat,
.scalarize_mul,
.scalarize_mul_optimized,
.scalarize_mul_wrap,
.scalarize_mul_sat,
.scalarize_div_float,
.scalarize_div_float_optimized,
.scalarize_div_trunc,
.scalarize_div_trunc_optimized,
.scalarize_div_floor,
.scalarize_div_floor_optimized,
.scalarize_div_exact,
.scalarize_div_exact_optimized,
.scalarize_rem,
.scalarize_rem_optimized,
.scalarize_mod,
.scalarize_mod_optimized,
.scalarize_max,
.scalarize_min,
.scalarize_add_with_overflow,
.scalarize_sub_with_overflow,
.scalarize_mul_with_overflow,
.scalarize_shl_with_overflow,
.scalarize_bit_and,
.scalarize_bit_or,
.scalarize_shr,
.scalarize_shr_exact,
.scalarize_shl,
.scalarize_shl_exact,
.scalarize_shl_sat,
.scalarize_xor,
.scalarize_not,
.scalarize_bitcast,
.scalarize_clz,
.scalarize_ctz,
.scalarize_popcount,
.scalarize_byte_swap,
.scalarize_bit_reverse,
.scalarize_sqrt,
.scalarize_sin,
.scalarize_cos,
.scalarize_tan,
.scalarize_exp,
.scalarize_exp2,
.scalarize_log,
.scalarize_log2,
.scalarize_log10,
.scalarize_abs,
.scalarize_floor,
.scalarize_ceil,
.scalarize_round,
.scalarize_trunc_float,
.scalarize_neg,
.scalarize_neg_optimized,
.scalarize_cmp_vector,
.scalarize_cmp_vector_optimized,
.scalarize_fptrunc,
.scalarize_fpext,
.scalarize_intcast,
.scalarize_trunc,
.scalarize_int_from_float,
.scalarize_int_from_float_optimized,
.scalarize_float_from_int,
.scalarize_reduce,
.scalarize_reduce_optimized,
.scalarize_shuffle_one,
.scalarize_shuffle_two,
.scalarize_select,
.scalarize_mul_add,
});
}
@@ -217,8 +294,7 @@ const WValue = union(enum) {
try gen.addLocal(.local_set, new_local.local.value);
return new_local;
},
.local, .stack_offset => return value,
else => unreachable,
else => return value,
}
}
@@ -765,7 +841,8 @@ fn generateInner(cg: *CodeGen, any_returns: bool) InnerError!Mir {
// In case we have a return value, but the last instruction is a noreturn (such as a while loop)
// we emit an unreachable instruction to tell the stack validator that part will never be reached.
if (any_returns and cg.air.instructions.len > 0) {
const inst: Air.Inst.Index = @enumFromInt(cg.air.instructions.len - 1);
const main_body = cg.air.getMainBody();
const inst: Air.Inst.Index = main_body[main_body.len - 1];
const last_inst_ty = cg.typeOfIndex(inst);
if (!last_inst_ty.hasRuntimeBits(zcu)) {
try cg.addTag(.@"unreachable");
@@ -1032,10 +1109,6 @@ fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
try cg.initializeStack();
}
if (!pointee_ty.hasRuntimeBits(zcu)) {
return cg.allocStack(Type.usize); // create a value containing just the stack pointer.
}
const abi_alignment = ptr_ty.ptrAlignment(zcu);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
return cg.fail("Type {f} with ABI size of {d} exceeds stack frame size", .{
@@ -1050,157 +1123,67 @@ fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
return .{ .stack_offset = .{ .value = offset, .references = 1 } };
}
/// Performs a copy of bytes for a given type. Copying all bytes
/// from rhs to lhs.
fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
fn emitMemoryCopy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
const len_known_neq_0 = switch (len) {
.imm32 => |val| if (val != 0) true else return,
.imm64 => |val| if (val != 0) true else return,
else => false,
};
// When bulk_memory is enabled, we lower it to wasm's memcpy instruction.
// If not, we lower it ourselves manually
if (cg.target.cpu.has(.wasm, .bulk_memory)) {
const len0_ok = cg.target.cpu.has(.wasm, .nontrapping_bulk_memory_len0);
const emit_check = !(len0_ok or len_known_neq_0);
const len0_ok = cg.target.cpu.has(.wasm, .nontrapping_bulk_memory_len0);
const emit_check = !(len0_ok or len_known_neq_0);
if (emit_check) {
try cg.startBlock(.block, .empty);
if (emit_check) {
try cg.startBlock(.block, .empty);
// Even if `len` is zero, the spec requires an implementation to trap if `src + len` or
// `dst + len` are out of memory bounds. This can easily happen in Zig in a case such
// as:
//
// const dst: [*]u8 = undefined;
// const src: [*]u8 = undefined;
// var len: usize = runtime_zero();
// @memcpy(dst[0..len], src[0..len]);
//
// So explicitly avoid using `memory.copy` in the `len == 0` case. Lovely design.
try cg.emitWValue(len);
try cg.addTag(.i32_eqz);
try cg.addLabel(.br_if, 0);
}
try cg.lowerToStack(dst);
try cg.lowerToStack(src);
// Even if `len` is zero, the spec requires an implementation to trap if `src + len` or
// `dst + len` are out of memory bounds. This can easily happen in Zig in a case such
// as:
//
// const dst: [*]u8 = undefined;
// const src: [*]u8 = undefined;
// var len: usize = runtime_zero();
// @memcpy(dst[0..len], src[0..len]);
//
// So explicitly avoid using `memory.copy` in the `len == 0` case. Lovely design.
try cg.emitWValue(len);
try cg.addExtended(.memory_copy);
try cg.addTag(.i32_eqz);
try cg.addLabel(.br_if, 0);
}
if (emit_check) {
try cg.endBlock();
}
try cg.lowerToStack(dst);
try cg.lowerToStack(src);
try cg.emitWValue(len);
try cg.addExtended(.memory_copy);
if (emit_check) {
try cg.endBlock();
}
}
fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
if (cg.target.cpu.has(.wasm, .bulk_memory)) {
try cg.emitMemoryCopy(dst, src, len);
return;
}
// when the length is comptime-known, rather than a runtime value, we can optimize the generated code by having
// the loop during codegen, rather than inserting a runtime loop into the binary.
switch (len) {
.imm32, .imm64 => blk: {
const length = switch (len) {
.imm32 => |val| val,
.imm64 => |val| val,
else => unreachable,
};
// if the size (length) is more than 32 bytes, we use a runtime loop instead to prevent
// binary size bloat.
if (length > 32) break :blk;
var offset: u32 = 0;
const lhs_base = dst.offset();
const rhs_base = src.offset();
while (offset < length) : (offset += 1) {
// get dst's address to store the result
try cg.emitWValue(dst);
// load byte from src's address
try cg.emitWValue(src);
switch (cg.ptr_size) {
.wasm32 => {
try cg.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
try cg.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
},
.wasm64 => {
try cg.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
try cg.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
},
}
}
return;
},
else => {},
try cg.lowerToStack(dst);
try cg.lowerToStack(src);
try cg.emitWValue(len);
try cg.addCallIntrinsic(.memcpy);
try cg.addTag(.drop);
}
fn memmove(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
if (cg.target.cpu.has(.wasm, .bulk_memory)) {
try cg.emitMemoryCopy(dst, src, len);
return;
}
// allocate a local for the offset, and set it to 0.
// This to ensure that inside loops we correctly re-set the counter.
var offset = try cg.allocLocal(Type.usize); // local for counter
defer offset.free(cg);
switch (cg.ptr_size) {
.wasm32 => try cg.addImm32(0),
.wasm64 => try cg.addImm64(0),
}
try cg.addLocal(.local_set, offset.local.value);
// outer block to jump to when loop is done
try cg.startBlock(.block, .empty);
try cg.startBlock(.loop, .empty);
// loop condition (offset == length -> break)
{
try cg.emitWValue(offset);
try cg.emitWValue(len);
switch (cg.ptr_size) {
.wasm32 => try cg.addTag(.i32_eq),
.wasm64 => try cg.addTag(.i64_eq),
}
try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
}
// get dst ptr
{
try cg.emitWValue(dst);
try cg.emitWValue(offset);
switch (cg.ptr_size) {
.wasm32 => try cg.addTag(.i32_add),
.wasm64 => try cg.addTag(.i64_add),
}
}
// get src value and also store in dst
{
try cg.emitWValue(src);
try cg.emitWValue(offset);
switch (cg.ptr_size) {
.wasm32 => {
try cg.addTag(.i32_add);
try cg.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
try cg.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
},
.wasm64 => {
try cg.addTag(.i64_add);
try cg.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
try cg.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
},
}
}
// increment loop counter
{
try cg.emitWValue(offset);
switch (cg.ptr_size) {
.wasm32 => {
try cg.addImm32(1);
try cg.addTag(.i32_add);
},
.wasm64 => {
try cg.addImm64(1);
try cg.addTag(.i64_add);
},
}
try cg.addLocal(.local_set, offset.local.value);
try cg.addLabel(.br, 0); // jump to start of loop
}
try cg.endBlock(); // close off loop block
try cg.endBlock(); // close off outer block
try cg.lowerToStack(dst);
try cg.lowerToStack(src);
try cg.emitWValue(len);
try cg.addCallIntrinsic(.memmove);
try cg.addTag(.drop);
}
fn ptrSize(cg: *const CodeGen) u16 {
@@ -1310,14 +1293,34 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const zcu = cg.pt.zcu;
const air_tags = cg.air.instructions.items(.tag);
return switch (air_tags[@intFromEnum(inst)]) {
// No "scalarize" legalizations are enabled, so these instructions never appear.
.legalize_vec_elem_val => unreachable,
.legalize_vec_store_elem => unreachable,
// No soft float legalizations are enabled.
.legalize_compiler_rt_call => unreachable,
.inferred_alloc, .inferred_alloc_comptime => unreachable,
.legalize_vec_elem_val => cg.airArrayElemVal(inst),
.legalize_vec_store_elem => {
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const bin_op = cg.air.extraData(Air.Bin, pl_op.payload).data;
const vec_ptr = try cg.resolveInst(pl_op.operand);
const elem_idx = try cg.resolveInst(bin_op.lhs);
const elem_val = try cg.resolveInst(bin_op.rhs);
const elem_ty = cg.typeOf(bin_op.rhs);
const elem_size = elem_ty.abiSize(zcu);
try cg.lowerToStack(vec_ptr);
try cg.emitWValue(elem_idx);
try cg.addImm32(@intCast(elem_size));
try cg.addTag(.i32_mul);
try cg.addTag(.i32_add);
const ptr = try WValue.toLocal(.stack, cg, Type.usize);
try cg.store(ptr, elem_val, elem_ty, 0);
return cg.finishAir(inst, .none, &.{ pl_op.operand, bin_op.lhs, bin_op.rhs });
},
.add,
.sub,
.mul,
@@ -1818,7 +1821,8 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.wasm_memory_size => cg.airWasmMemorySize(inst),
.wasm_memory_grow => cg.airWasmMemoryGrow(inst),
.memcpy, .memmove => cg.airMemcpy(inst),
.memcpy => cg.airMemcpy(inst),
.memmove => cg.airMemmove(inst),
.ret_addr => cg.airRetAddr(inst),
.tag_name => cg.airTagName(inst),
@@ -2105,7 +2109,7 @@ fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
assert(ptr_info.packed_offset.host_size == 0); // legalize .expand_packed_store
assert(!(ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none)); // legalize .expand_packed_store
try cg.store(lhs, rhs, ty, 0);
@@ -2180,35 +2184,22 @@ fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (!ty.hasRuntimeBits(zcu)) return cg.finishAir(inst, .none, &.{ty_op.operand});
assert(ptr_info.packed_offset.host_size == 0); // legalize .expand_packed_load
assert(!(ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none)); // legalize .expand_packed_load
const result = result: {
if (isByRef(ty, zcu, cg.target)) {
const new_local = try cg.allocStack(ty);
try cg.store(new_local, operand, ty, 0);
break :result new_local;
}
const loaded = try cg.load(operand, ty, 0);
const ty_size = ty.abiSize(zcu);
if (ty.isAbiInt(zcu) and ty_size * 8 > ty.bitSize(zcu)) {
const int_info = ty.intInfo(zcu);
const loaded_int_ty: IntType = .{
.is_signed = int_info.signedness == .signed,
.bits = @intCast(ty_size * 8),
};
break :result try cg.intTrunc(.fromType(cg, ty), loaded_int_ty, loaded);
} else {
break :result loaded;
}
};
const result = try cg.load(operand, ty, 0);
return cg.finishAir(inst, result, &.{ty_op.operand});
}
/// Loads an operand from the linear memory section.
/// NOTE: Leaves the value on the stack.
/// NOTE: Leaves the value on the stack, if isByRef == false.
fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
const zcu = cg.pt.zcu;
if (isByRef(ty, zcu, cg.target)) {
const val = try cg.allocStack(ty);
try cg.store(val, try operand.toLocal(cg, .usize), ty, 0);
return val;
}
// load local's value from memory by its stack position
try cg.emitWValue(operand);
@@ -2254,6 +2245,14 @@ fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue
},
);
if (ty.isAbiInt(zcu)) {
const int_info: IntType = .fromType(cg, ty);
switch (int_info.bits) {
8, 16, 32, 64 => {},
else => _ = try cg.intWrap(int_info, .stack),
}
}
return .stack;
}
@@ -2321,7 +2320,7 @@ const IntType = struct {
.hasRuntimeBits(zcu)) .{ .is_signed = false, .bits = zcu.errorSetBits() } else unreachable,
.simple_type => |simple_type| return switch (simple_type) {
.bool => .{ .is_signed = false, .bits = 1 },
.anyerror => .{ .is_signed = false, .bits = zcu.errorSetBits() },
.anyerror, .adhoc_inferred_error_set => .{ .is_signed = false, .bits = zcu.errorSetBits() },
.isize => .{ .is_signed = true, .bits = cg.target.ptrBitWidth() },
.usize => .{ .is_signed = false, .bits = cg.target.ptrBitWidth() },
.c_char => .{ .is_signed = cg.target.cCharSignedness() == .signed, .bits = cg.target.cTypeBitSize(.char) },
@@ -2334,7 +2333,7 @@ const IntType = struct {
.c_longlong => .{ .is_signed = true, .bits = cg.target.cTypeBitSize(.longlong) },
.c_ulonglong => .{ .is_signed = false, .bits = cg.target.cTypeBitSize(.longlong) },
.f16, .f32, .f64, .f80, .f128, .c_longdouble => unreachable,
.anyopaque, .void, .type, .comptime_int, .comptime_float, .noreturn, .null, .undefined, .enum_literal, .adhoc_inferred_error_set, .generic_poison => unreachable,
.anyopaque, .void, .type, .comptime_int, .comptime_float, .noreturn, .null, .undefined, .enum_literal, .generic_poison => unreachable,
},
.struct_type => {
const loaded_struct = ip.loadStructType(ty_index);
@@ -3069,7 +3068,13 @@ fn intClz(cg: *CodeGen, ty: IntType, operand: WValue) InnerError!WValue {
var msb = try (try cg.load(operand, Type.u64, 8)).toLocal(cg, Type.u64);
defer msb.free(cg);
try cg.emitWValue(msb);
if (ty.is_signed and ty.bits < 128) {
const mask: u64 = ~@as(u64, 0) >> @intCast(128 - ty.bits);
_ = try cg.intAnd(.u64, msb, .{ .imm64 = mask });
} else {
try cg.emitWValue(msb);
}
try cg.addTag(.i64_clz);
_ = try cg.load(operand, Type.u64, 0);
try cg.addTag(.i64_clz);
@@ -3078,6 +3083,12 @@ fn intClz(cg: *CodeGen, ty: IntType, operand: WValue) InnerError!WValue {
_ = try cg.intCmp(.u64, .neq, msb, .{ .imm64 = 0 });
try cg.addTag(.select);
try cg.addTag(.i32_wrap_i64);
if (ty.bits < 128) {
try cg.addImm32(128 - ty.bits);
try cg.addTag(.i32_sub);
}
return .stack;
},
else => {
@@ -4622,11 +4633,18 @@ fn floatFromInt(cg: *CodeGen, dest_ty: FloatType, src_ty: IntType, operand: WVal
fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
const pt = cg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
const offset: u64 = prev_offset + ptr.byte_offset;
return switch (ptr.base_addr) {
.nav => |nav| return .{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } },
.uav => |uav| return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } },
.nav => |nav| return if (Type.fromInterned(ip.getNav(nav).resolved.?.type).isRuntimeFnOrHasRuntimeBits(zcu))
.{ .nav_ref = .{ .nav_index = nav, .offset = @intCast(offset) } }
else
.{ .imm32 = @intCast(zcu.navAlignment(nav).forward(@as(u32, 0xaaaaaaaa))) },
.uav => |uav| return if (Type.fromInterned(ip.typeOf(uav.val)).isRuntimeFnOrHasRuntimeBits(zcu))
.{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset), .orig_ptr_ty = uav.orig_ty } }
else
.{ .imm32 = @intCast(Type.fromInterned(uav.orig_ty).ptrAlignment(zcu).forward(@as(u32, 0xaaaaaaaa))) },
.int => return cg.lowerConstant(try pt.intValue(.usize, offset)),
.eu_payload => |eu_ptr| try cg.lowerPtr(
eu_ptr,
@@ -4960,9 +4978,9 @@ fn airCmp(cg: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) Inne
try cg.addImm32(if (op == .eq) 0 else 1);
try cg.addLocal(.local_set, result.local.value);
_ = try cg.isNull(lhs, operand_ty, .i32_eq);
_ = try cg.isNull(lhs, operand_ty, .i32_eq, .value);
try cg.addLocal(.local_tee, lhs_null.local.value);
_ = try cg.isNull(rhs, operand_ty, .i32_eq);
_ = try cg.isNull(rhs, operand_ty, .i32_eq, .value);
try cg.addTag(.i32_ne);
try cg.addLabel(.br_if, 0);
@@ -5234,26 +5252,56 @@ fn airBitcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn bitcast(cg: *CodeGen, dest_ty: Type, src_ty: Type, operand: WValue) InnerError!?WValue {
const zcu = cg.pt.zcu;
const bit_size = src_ty.bitSize(zcu);
const needs_wrapping = (src_ty.isSignedInt(zcu) != dest_ty.isSignedInt(zcu)) and
const dest_signed = if (dest_ty.isAbiInt(zcu)) IntType.fromType(cg, dest_ty).is_signed else false;
const src_signed = if (src_ty.isAbiInt(zcu)) IntType.fromType(cg, src_ty).is_signed else false;
const needs_wrapping = (src_signed != dest_signed) and
bit_size != 32 and bit_size != 64 and bit_size != 128;
if (src_ty.isAnyFloat() or dest_ty.isAnyFloat()) {
if (dest_ty.ip_index == .f16_type or src_ty.ip_index == .f16_type) return null;
if (dest_ty.bitSize(zcu) > 64) return null;
assert((dest_ty.isInt(zcu) and src_ty.isAnyFloat()) or (dest_ty.isAnyFloat() and src_ty.isInt(zcu)));
if (src_ty.isAnyFloat()) {
const float_ty: FloatType = .fromType(cg, src_ty);
switch (float_ty) {
.f16, .f80, .f128 => {
if (dest_signed) {
const int_ty: IntType = .fromType(cg, dest_ty);
return try cg.intWrap(int_ty, operand);
} else {
return null;
}
},
.f32 => {
try cg.emitWValue(operand);
try cg.addTag(.i32_reinterpret_f32);
return .stack;
},
.f64 => {
try cg.emitWValue(operand);
try cg.addTag(.i64_reinterpret_f64);
return .stack;
},
}
}
const dest_valtype = typeToValtype(dest_ty, zcu, cg.target);
const opcode: Mir.Inst.Tag = switch (dest_valtype) {
.i32 => .i32_reinterpret_f32,
.i64 => .i64_reinterpret_f64,
.f32 => .f32_reinterpret_i32,
.f64 => .f64_reinterpret_i64,
else => unreachable,
};
try cg.emitWValue(operand);
try cg.addTag(opcode);
return .stack;
if (dest_ty.isAnyFloat()) {
const float_ty: FloatType = .fromType(cg, dest_ty);
switch (float_ty) {
.f16, .f80, .f128 => {
if (src_signed) {
return try cg.intWrap(.{ .bits = @intCast(bit_size), .is_signed = false }, operand);
} else {
return null;
}
},
.f32 => {
try cg.emitWValue(operand);
try cg.addTag(.f32_reinterpret_i32);
return .stack;
},
.f64 => {
try cg.emitWValue(operand);
try cg.addTag(.f64_reinterpret_i64);
return .stack;
},
}
}
if (isByRef(src_ty, zcu, cg.target) and !isByRef(dest_ty, zcu, cg.target)) {
@@ -5751,23 +5799,27 @@ fn airWrapErrUnionErr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.finishAir(inst, result, &.{ty_op.operand});
}
fn airIsNull(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
const zcu = cg.pt.zcu;
const OpKind = enum { value, ptr };
fn airIsNull(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: OpKind) InnerError!void {
const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try cg.resolveInst(un_op);
const op_ty = cg.typeOf(un_op);
const optional_ty = if (op_kind == .ptr) op_ty.childType(zcu) else op_ty;
const result = try cg.isNull(operand, optional_ty, opcode);
const result = try cg.isNull(operand, op_ty, opcode, op_kind);
return cg.finishAir(inst, result, &.{un_op});
}
/// For a given type and operand, checks if it's considered `null`.
/// NOTE: Leaves the result on the stack
fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opcode) InnerError!WValue {
fn isNull(cg: *CodeGen, operand: WValue, op_ty: Type, opcode: std.wasm.Opcode, op_kind: OpKind) InnerError!WValue {
const pt = cg.pt;
const zcu = pt.zcu;
try cg.emitWValue(operand);
const optional_ty = switch (op_kind) {
.value => op_ty,
.ptr => op_ty.childType(zcu),
};
const payload_ty = optional_ty.optionalChild(zcu);
if (!optional_ty.optionalReprIsPayload(zcu)) {
// When payload is zero-bits, we can treat operand as a value, rather than
@@ -5783,6 +5835,13 @@ fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opc
.wasm32 => try cg.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
.wasm64 => try cg.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
}
} else {
if (op_kind == .ptr) {
try cg.addMemArg(.i32_load, .{
.offset = operand.offset(),
.alignment = 4,
});
}
}
// Compare the null value with '0'
@@ -5934,10 +5993,7 @@ fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try cg.addTag(.i32_mul);
try cg.addTag(.i32_add);
const elem_result = if (isByRef(elem_ty, zcu, cg.target))
.stack
else
try cg.load(.stack, elem_ty, 0);
const elem_result = try cg.load(.stack, elem_ty, 0);
return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
}
@@ -5991,10 +6047,7 @@ fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// create a slice on the stack
const slice_local = try cg.allocStack(slice_ty);
// store the array ptr in the slice
if (array_ty.hasRuntimeBits(zcu)) {
try cg.store(slice_local, operand, Type.usize, 0);
}
try cg.store(slice_local, operand, Type.usize, 0);
// store the length of the array in the slice
const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
@@ -6026,10 +6079,7 @@ fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try cg.addTag(.i32_mul);
try cg.addTag(.i32_add);
const elem_result = if (isByRef(elem_ty, zcu, cg.target))
.stack
else
try cg.load(.stack, elem_ty, 0);
const elem_result = try cg.load(.stack, elem_ty, 0);
return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
}
@@ -6349,12 +6399,8 @@ fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => unreachable,
}
}
const elem_size = elem_ty.bitSize(zcu);
const vector_len = @as(usize, @intCast(ty.vectorLen(zcu)));
if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
return cg.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
}
const vector_len = @as(usize, @intCast(ty.vectorLen(zcu)));
const result = try cg.allocStack(ty);
const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
var index: usize = 0;
@@ -6494,7 +6540,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result: WValue = result_value: {
switch (result_ty.zigTypeTag(zcu)) {
.array => {
.array, .vector => {
const result = try cg.allocStack(result_ty);
const elem_ty = result_ty.childType(zcu);
const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
@@ -6554,7 +6600,6 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
break :result_value result;
},
},
.vector => return cg.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
else => unreachable,
}
};
@@ -6792,6 +6837,37 @@ fn airMemcpy(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
fn airMemmove(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const zcu = cg.pt.zcu;
const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const dst = try cg.resolveInst(bin_op.lhs);
const dst_ty = cg.typeOf(bin_op.lhs);
const ptr_elem_ty = dst_ty.childType(zcu);
const src = try cg.resolveInst(bin_op.rhs);
const src_ty = cg.typeOf(bin_op.rhs);
const len = switch (dst_ty.ptrSize(zcu)) {
.slice => blk: {
const slice_len = try cg.sliceLen(dst);
if (ptr_elem_ty.abiSize(zcu) != 1) {
try cg.emitWValue(slice_len);
try cg.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
try cg.addTag(.i32_mul);
try cg.addLocal(.local_set, slice_len.local.value);
}
break :blk slice_len;
},
.one => @as(WValue, .{
.imm32 = @as(u32, @intCast(ptr_elem_ty.arrayLen(zcu) * ptr_elem_ty.childType(zcu).abiSize(zcu))),
}),
.c, .many => unreachable,
};
const dst_ptr = try cg.sliceOrArrayPtr(dst, dst_ty);
const src_ptr = try cg.sliceOrArrayPtr(src, src_ty);
try cg.memmove(dst_ptr, src_ptr, len);
return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
}
fn airRetAddr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// TODO: Implement this properly once stack serialization is solved
return cg.finishAir(inst, switch (cg.ptr_size) {
@@ -6992,7 +7068,7 @@ fn airTagName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = try cg.allocStack(cg.typeOfIndex(inst));
try cg.lowerToStack(result_ptr);
try cg.emitWValue(operand);
try cg.lowerToStack(operand);
try cg.addInst(.{ .tag = .call_tag_name, .data = .{ .ip_index = enum_ty.toIntern() } });
return cg.finishAir(inst, result_ptr, &.{un_op});
@@ -7195,9 +7271,8 @@ fn airAtomicRmw(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty = cg.typeOfIndex(inst);
const op: std.builtin.AtomicRmwOp = extra.op();
const int_ty: IntType = .fromType(cg, ty);
if (cg.useAtomicFeature()) {
const int_ty: IntType = .fromType(cg, ty);
switch (op) {
.Max,
.Min,
@@ -7312,35 +7387,65 @@ fn airAtomicRmw(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.Add,
.Sub,
=> {
if (ty.isAnyFloat()) {
const float_ty: FloatType = .fromType(cg, ty);
try cg.emitWValue(ptr);
_ = switch (op) {
.Add => try cg.floatAdd(float_ty, result, operand),
.Sub => try cg.floatSub(float_ty, result, operand),
else => unreachable,
};
try cg.store(.stack, .stack, ty, ptr.offset());
} else {
const int_ty: IntType = .fromType(cg, ty);
try cg.emitWValue(ptr);
_ = switch (op) {
.Add => try cg.intAdd(int_ty, result, operand),
.Sub => try cg.intSub(int_ty, result, operand),
else => unreachable,
};
_ = try cg.intWrap(int_ty, .stack);
try cg.store(.stack, .stack, ty, ptr.offset());
}
},
.And,
.Or,
.Xor,
=> {
const int_ty: IntType = .fromType(cg, ty);
try cg.emitWValue(ptr);
_ = switch (op) {
.Add => try cg.intAdd(int_ty, result, operand),
.Sub => try cg.intSub(int_ty, result, operand),
.And => try cg.intAnd(int_ty, result, operand),
.Or => try cg.intOr(int_ty, result, operand),
.Xor => try cg.intXor(int_ty, result, operand),
else => unreachable,
};
if (ty.isInt(zcu) and (op == .Add or op == .Sub)) {
_ = try cg.intWrap(int_ty, .stack);
}
try cg.store(.stack, .stack, ty, ptr.offset());
},
.Max,
.Min,
=> {
try cg.emitWValue(ptr);
try cg.emitWValue(result);
try cg.emitWValue(operand);
_ = try cg.intCmp(int_ty, if (op == .Max) .gt else .lt, result, operand);
try cg.addTag(.select);
try cg.store(.stack, .stack, ty, ptr.offset());
if (ty.isAnyFloat()) {
const float_ty: FloatType = .fromType(cg, ty);
try cg.emitWValue(ptr);
try cg.emitWValue(result);
try cg.emitWValue(operand);
_ = try cg.floatCmp(float_ty, if (op == .Max) .gt else .lt, result, operand);
try cg.addTag(.select);
try cg.store(.stack, .stack, ty, ptr.offset());
} else {
const int_ty: IntType = .fromType(cg, ty);
try cg.emitWValue(ptr);
try cg.emitWValue(result);
try cg.emitWValue(operand);
_ = try cg.intCmp(int_ty, if (op == .Max) .gt else .lt, result, operand);
try cg.addTag(.select);
try cg.store(.stack, .stack, ty, ptr.offset());
}
},
.Nand => {
const int_ty: IntType = .fromType(cg, ty);
try cg.emitWValue(ptr);
const and_res = try cg.intAnd(int_ty, result, operand);
if (int_ty.bits <= 32) {
@@ -7423,49 +7528,51 @@ fn airAsm(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else
.none;
var local_map: assembly.LocalMap = .empty;
defer local_map.deinit(cg.gpa);
if (unwrapped_asm.source.len != 0) {
var local_map: assembly.LocalMap = .empty;
defer local_map.deinit(cg.gpa);
{
var it = unwrapped_asm.iterateOutputs();
if (it.next()) |output| {
const constraint = output.constraint;
assert(output.operand == .none);
const name = output.name;
{
var it = unwrapped_asm.iterateOutputs();
if (it.next()) |output| {
const constraint = output.constraint;
assert(output.operand == .none);
const name = output.name;
if (!mem.eql(u8, constraint, "=r")) {
return cg.fail("Self-hosted wasm backend requires output constraint to be equal \"=r\"", .{});
}
if (!mem.eql(u8, constraint, "=r")) {
return cg.fail("Self-hosted wasm backend requires output constraint to be equal \"=r\"", .{});
}
const gop = try local_map.getOrPutValue(cg.gpa, name, result.local.value);
assert(!gop.found_existing); // first value
const gop = try local_map.getOrPutValue(cg.gpa, name, result.local.value);
assert(!gop.found_existing); // first value
assert(it.next() == null);
}
}
{
var it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
const operand = try cg.resolveInst(input.operand);
const name = input.name;
if (!mem.eql(u8, constraint, "r")) {
return cg.fail("Self-hosted wasm backend requires input constraint to be equal \"r\"", .{});
}
try cg.lowerToStack(operand);
const op_local = try WValue.toLocal(.stack, cg, cg.typeOf(input.operand));
const gop = try local_map.getOrPutValue(cg.gpa, name, op_local.local.value);
if (gop.found_existing) {
return cg.fail("Duplicate asm variable name \"{s}\"", .{name});
assert(it.next() == null);
}
}
}
try assembly.assemble(cg, unwrapped_asm.source, &local_map);
{
var it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
const operand = try cg.resolveInst(input.operand);
const name = input.name;
if (!mem.eql(u8, constraint, "r")) {
return cg.fail("Self-hosted wasm backend requires input constraint to be equal \"r\"", .{});
}
try cg.lowerToStack(operand);
const op_local = try WValue.toLocal(.stack, cg, cg.typeOf(input.operand));
const gop = try local_map.getOrPutValue(cg.gpa, name, op_local.local.value);
if (gop.found_existing) {
return cg.fail("Duplicate asm variable name \"{s}\"", .{name});
}
}
}
try assembly.assemble(cg, unwrapped_asm.source, &local_map);
}
var bt = cg.liveness.iterateBigTomb(inst);
for (outputs) |output| if (output != .none) cg.feed(&bt, output);
+3
View File
@@ -1028,6 +1028,9 @@ pub const Intrinsic = enum(u32) {
tanf,
tanq,
truncq,
memcpy,
memmove,
memset,
__addo_limb64,
__subo_limb64,
__cmp_limb64,
+55 -35
View File
@@ -1865,7 +1865,6 @@ fn emitTagNameFunction(
) !void {
const comp = wasm.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_type_ip);
@@ -1909,11 +1908,7 @@ fn emitTagNameFunction(
}
const int_info = Zcu.Type.intInfo(.fromInterned(enum_type.int_tag_type), zcu);
const outer_block_type: std.wasm.BlockType = switch (int_info.bits) {
0...32 => .i32,
33...64 => .i64,
else => return diags.fail("wasm linker does not yet implement @tagName for sparse enums with more than 64 bit integer tag types", .{}),
};
const is_big_int = int_info.bits > 64;
try code.ensureUnusedCapacity(
gpa,
@@ -1930,41 +1925,66 @@ fn emitTagNameFunction(
// Outer block that computes table offset.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
code.appendAssumeCapacity(@intFromEnum(outer_block_type));
code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.i32));
for (tag_values, 0..) |tag_value, tag_index| {
// block for this if case
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.block));
code.appendAssumeCapacity(@intFromEnum(std.wasm.BlockType.empty));
// Tag value whose name should be returned.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
const val: Zcu.Value = .fromInterned(tag_value);
switch (outer_block_type) {
.i32 => {
const x: u32 = switch (int_info.signedness) {
.signed => @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))),
.unsigned => @intCast(val.toUnsignedInt(zcu)),
};
appendReservedI32Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_ne));
},
.i64 => {
const x: u64 = switch (int_info.signedness) {
.signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(zcu),
};
appendReservedI64Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
},
else => unreachable,
}
if (is_big_int) {
var val_space: Zcu.Value.BigIntSpace = undefined;
const val_bigint = val.toBigInt(&val_space, zcu);
const num_limbs = (int_info.bits + 63) / 64;
// if they're not equal, break out of current branch
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
appendReservedUleb32(code, 0);
const limbs = try gpa.alloc(u64, num_limbs);
defer gpa.free(limbs);
val_bigint.writeTwosComplement(@ptrCast(limbs), .little);
try code.ensureUnusedCapacity(gpa, 35 * num_limbs);
for (0..num_limbs) |limb_index| {
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_load));
appendReservedUleb32(code, @ctz(@as(u32, 8)));
appendReservedUleb32(code, @intCast(limb_index * 8));
appendReservedI64Const(code, limbs[limb_index]);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
appendReservedUleb32(code, 0);
}
} else {
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_get));
appendReservedUleb32(code, 1);
switch (int_info.bits) {
0...32 => {
const x: u32 = switch (int_info.signedness) {
.signed => @bitCast(@as(i32, @intCast(val.toSignedInt(zcu)))),
.unsigned => @intCast(val.toUnsignedInt(zcu)),
};
appendReservedI32Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_ne));
},
33...64 => {
const x: u64 = switch (int_info.signedness) {
.signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(zcu),
};
appendReservedI64Const(code, x);
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_ne));
},
else => unreachable,
}
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.br_if));
appendReservedUleb32(code, 0);
}
// Put the table offset of the result on the stack.
appendReservedI32Const(code, @intCast(tag_index * slice_abi_size));
@@ -1995,7 +2015,7 @@ fn appendReservedI32Const(bytes: *ArrayList(u8), val: u32) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i32_const));
var w: std.Io.Writer = .fromArrayList(bytes);
defer bytes.* = w.toArrayList();
return w.writeSleb128(val) catch |err| switch (err) {
return w.writeSleb128(@as(i32, @bitCast(val))) catch |err| switch (err) {
error.WriteFailed => unreachable,
};
}
@@ -2005,7 +2025,7 @@ fn appendReservedI64Const(bytes: *ArrayList(u8), val: u64) void {
bytes.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.i64_const));
var w: std.Io.Writer = .fromArrayList(bytes);
defer bytes.* = w.toArrayList();
return w.writeSleb128(val) catch |err| switch (err) {
return w.writeSleb128(@as(i64, @bitCast(val))) catch |err| switch (err) {
error.WriteFailed => unreachable,
};
}
+2 -5
View File
@@ -214,9 +214,9 @@ test "@abs floats" {
try comptime testAbsFloats(f64);
try testAbsFloats(f64);
try comptime testAbsFloats(f80);
if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f80);
if (builtin.zig_backend != .stage2_spirv and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f80);
try comptime testAbsFloats(f128);
if (builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_spirv and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f128);
if (builtin.zig_backend != .stage2_spirv and builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f128);
}
fn testAbsFloats(comptime T: type) !void {
@@ -259,7 +259,6 @@ fn testAbsFloats(comptime T: type) !void {
test "@abs int vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -326,7 +325,6 @@ fn testAbsIntVectors(comptime len: comptime_int) !void {
test "@abs unsigned int vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -385,7 +383,6 @@ fn testAbsUnsignedIntVectors(comptime len: comptime_int) !void {
test "@abs float vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-1
View File
@@ -527,7 +527,6 @@ test "alignment of zero-bit types is respected" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const S = struct { arr: [0]usize = .{} };
-1
View File
@@ -189,7 +189,6 @@ test "atomicrmw with floats" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
try testAtomicRmwFloat();
try comptime testAtomicRmwFloat();
-1
View File
@@ -1147,7 +1147,6 @@ test "arrays and vectors with big integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23805
-2
View File
@@ -387,7 +387,6 @@ test "bitcast vector to integer and back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.endian() == .big and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
@@ -530,7 +529,6 @@ test "@bitCast of extern struct containing pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
const S = struct {
-5
View File
@@ -122,7 +122,6 @@ fn vector8() !void {
test "bitReverse vectors u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -142,7 +141,6 @@ fn vector16() !void {
test "bitReverse vectors u16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -162,7 +160,6 @@ fn vector24() !void {
test "bitReverse vectors u24" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -181,8 +178,6 @@ fn vector0() !void {
}
test "bitReverse vectors u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
try comptime vector0();
try vector0();
}
-5
View File
@@ -82,7 +82,6 @@ fn vector8() !void {
test "@byteSwap vectors u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -102,7 +101,6 @@ fn vector16() !void {
test "@byteSwap vectors u16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -122,7 +120,6 @@ fn vector24() !void {
test "@byteSwap vectors u24" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -141,8 +138,6 @@ fn vector0() !void {
}
test "@byteSwap vectors u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
try comptime vector0();
try vector0();
}
-15
View File
@@ -268,7 +268,6 @@ test "type coercion from int to float" {
try check.value(c_longdouble, @as(u1, 0)); // Smoke test - size varies by target.
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
// Basic sanity check that the coercions work for vectors too.
const int_vec: @Vector(2, u24) = @splat(123);
@@ -758,7 +757,6 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
test "@intCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2145,7 +2143,6 @@ test "peer type resolution: array and vector with same child type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
var arr: [2]u32 = .{ 0, 1 };
var vec: @Vector(2, u32) = .{ 2, 3 };
@@ -2167,7 +2164,6 @@ test "peer type resolution: array and vector with same child type" {
test "peer type resolution: array with smaller child type and vector with larger child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2279,7 +2275,6 @@ test "peer type resolution: three-way resolution combines error set and optional
test "peer type resolution: vector and optional vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -2349,7 +2344,6 @@ test "peer type resolution: array and tuple" {
test "peer type resolution: vector and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var vec: @Vector(3, i32) = .{ 1, 2, 3 };
@@ -2373,7 +2367,6 @@ test "peer type resolution: vector and tuple" {
test "peer type resolution: vector and array and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2816,7 +2809,6 @@ test "cast builtins can wrap result in error union and optional" {
test "@floatCast on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2857,7 +2849,6 @@ test "@floatCast on vector" {
test "@ptrFromInt on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2882,7 +2873,6 @@ test "@ptrFromInt on vector" {
test "@intFromPtr on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2907,7 +2897,6 @@ test "@intFromPtr on vector" {
test "@floatFromInt on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2927,7 +2916,6 @@ test "@floatFromInt on vector" {
test "@intFromFloat on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2947,7 +2935,6 @@ test "@intFromFloat on vector" {
test "@intFromBool on vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -3023,7 +3010,6 @@ test "result information is preserved through many nested structures" {
test "@intCast vector of signed integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -3109,7 +3095,6 @@ test "@intFromFloat boundary cases" {
test "@intFromFloat vector boundary cases" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn case(comptime I: type, unshifted_inputs: [2]f32, expected: [2]I) !void {
+98 -1
View File
@@ -1057,7 +1057,6 @@ test "tag name with signed enum values" {
test "tag name with large enum values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const Kdf = enum(u128) {
aes_kdf = 0xea4f8ac1080d74bf60448a629af3d9c9,
@@ -1074,6 +1073,104 @@ test "tag name with large enum values" {
try expect(mem.eql(u8, @tagName(kdf), "argon2id"));
}
test "@tagName with exotic integer enum types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = struct {
fn testEnumSigned(comptime T: type) !void {
{
const E1 = enum(T) {
a = -125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
};
var e: E1 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
{
const E2 = enum(T) {
a = -125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
_,
};
var e: E2 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
}
fn testEnumUnsigned(comptime T: type) !void {
{
const E1 = enum(T) {
a = std.math.maxInt(T) - 125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
};
var e: E1 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
{
const E2 = enum(T) {
a = std.math.maxInt(T) - 125,
b = 125,
c = std.math.minInt(T),
d = std.math.maxInt(T),
_,
};
var e: E2 = .a;
try expect(mem.eql(u8, @tagName(e), "a"));
e = .b;
try expect(mem.eql(u8, @tagName(e), "b"));
e = .c;
try expect(mem.eql(u8, @tagName(e), "c"));
e = .d;
try expect(mem.eql(u8, @tagName(e), "d"));
}
}
fn doTheTest() !void {
try testEnumSigned(i33);
try testEnumSigned(i95);
try testEnumSigned(i127);
try testEnumSigned(i257);
try testEnumUnsigned(u33);
try testEnumUnsigned(u95);
try testEnumUnsigned(u127);
try testEnumUnsigned(u257);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "@tagName in callconv(.c) function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
-22
View File
@@ -221,9 +221,7 @@ fn testCmp(comptime T: type) !void {
test "vector cmp f16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
@@ -236,7 +234,6 @@ test "vector cmp f16" {
test "vector cmp f32" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -249,7 +246,6 @@ test "vector cmp f32" {
test "vector cmp f64" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -262,7 +258,6 @@ test "vector cmp f64" {
test "vector cmp f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -281,7 +276,6 @@ test "vector cmp f80/c_longdouble" {
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .powerpc64le) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
try testCmpVector(f80);
try comptime testCmpVector(f80);
@@ -477,7 +471,6 @@ fn testSqrt(comptime T: type) !void {
test "@sqrt with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -546,7 +539,6 @@ fn testSin(comptime T: type) !void {
test "@sin with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -615,7 +607,6 @@ fn testCos(comptime T: type) !void {
test "@cos with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -684,7 +675,6 @@ fn testTan(comptime T: type) !void {
test "@tan with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -757,7 +747,6 @@ fn testExp(comptime T: type) !void {
test "@exp with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -825,7 +814,6 @@ fn testExp2(comptime T: type) !void {
test "@exp2 with @vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -893,7 +881,6 @@ fn testLog(comptime T: type) !void {
test "@log with @vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -959,7 +946,6 @@ fn testLog2(comptime T: type) !void {
test "@log2 with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1031,7 +1017,6 @@ fn testLog10(comptime T: type) !void {
test "@log10 with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1151,7 +1136,6 @@ fn testFabs(comptime T: type) !void {
test "@abs with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFabsWithVectors();
@@ -1241,7 +1225,6 @@ fn testFloor(comptime T: type) !void {
test "@floor with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testFloorWithVectors();
@@ -1349,7 +1332,6 @@ fn testCeil(comptime T: type) !void {
test "@ceil with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testCeilWithVectors();
@@ -1439,7 +1421,6 @@ fn testTrunc(comptime T: type) !void {
test "@trunc with vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testTruncWithVectors();
@@ -1460,7 +1441,6 @@ test "neg f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.os.tag == .freebsd) {
// TODO file issue to track this failure
@@ -1486,7 +1466,6 @@ test "neg f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testNeg(f80);
@@ -1741,7 +1720,6 @@ test "comptime calls are only memoized when float arguments are bit-for-bit equa
}
test "result location forwarded through unary float builtins" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-1
View File
@@ -4,7 +4,6 @@ const builtin = @import("builtin");
test "strlit to vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const strlit = "0123456789abcdef0123456789ABCDEF";
-12
View File
@@ -100,7 +100,6 @@ fn testOneClz(comptime T: type, x: T) u32 {
test "@clz vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -184,7 +183,6 @@ fn testCtz128() !void {
test "@ctz vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -584,7 +582,6 @@ test "large integer division" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2260,7 +2257,6 @@ fn testRound(comptime T: type, x: T) !void {
test "vector integer addition" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2322,7 +2318,6 @@ fn testNanEqNan(comptime F: type) !void {
test "vector comparison" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2414,7 +2409,6 @@ test "mod lazy values" {
test "@clz works on both vector and scalar inputs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2489,7 +2483,6 @@ test "runtime int comparison to inf is comptime-known" {
}
test "float divide by zero" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2525,7 +2518,6 @@ test "float divide by zero" {
test "partially-runtime integer vector division would be illegal if vector elements were reordered" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2554,7 +2546,6 @@ test "partially-runtime integer vector division would be illegal if vector eleme
test "float vector division of comptime zero by runtime nan is nan" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2572,7 +2563,6 @@ test "float vector division of comptime zero by runtime nan is nan" {
test "float vector multiplication of comptime zero by runtime nan is nan" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2588,7 +2578,6 @@ test "float vector multiplication of comptime zero by runtime nan is nan" {
}
test "comptime float vector division of zero by nan is nan" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2603,7 +2592,6 @@ test "comptime float vector division of zero by nan is nan" {
}
test "comptime float vector multiplication of zero by nan is nan" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-7
View File
@@ -28,7 +28,6 @@ test "@max" {
test "@max on vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -82,7 +81,6 @@ test "@min" {
test "@min for vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -165,7 +163,6 @@ test "@min/@max more than two arguments" {
test "@min/@max more than two vector arguments" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -195,7 +192,6 @@ test "@min/@max notices bounds" {
test "@min/@max notices vector bounds" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -246,7 +242,6 @@ test "@min/@max notices bounds from types" {
test "@min/@max notices bounds from vector types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -287,7 +282,6 @@ test "@min/@max notices bounds from types when comptime-known value is undef" {
test "@min/@max notices bounds from vector types when element of comptime-known vector is undef" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -357,7 +351,6 @@ test "@min/@max with runtime signed and unsigned integers of same size" {
test "@min/@max with runtime vectors of signed and unsigned integers of same size" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-4
View File
@@ -6,7 +6,6 @@ test "memmove and memset intrinsics" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemmoveMemset();
@@ -34,7 +33,6 @@ test "@memmove with both operands single-ptr-to-array, one is null-terminated" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemmoveBothSinglePtrArrayOneIsNullTerminated();
@@ -78,7 +76,6 @@ test "@memmove dest many pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemmoveDestManyPtr();
@@ -121,7 +118,6 @@ test "@memmove slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
try testMemmoveSlice();
-2
View File
@@ -80,7 +80,6 @@ test "memset with 1-byte struct element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct { x: bool };
var buf: [5]S = undefined;
@@ -93,7 +92,6 @@ test "memset with 1-byte array element" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const A = [1]bool;
var buf: [5]A = undefined;
-5
View File
@@ -100,7 +100,6 @@ fn vector16() !void {
test "vector f16" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -125,7 +124,6 @@ fn vector32() !void {
test "vector f32" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -150,7 +148,6 @@ fn vector64() !void {
test "vector f64" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -174,7 +171,6 @@ fn vector80() !void {
test "vector f80" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -200,7 +196,6 @@ fn vector128() !void {
test "vector f128" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-1
View File
@@ -77,7 +77,6 @@ fn testPopCountIntegers() !void {
test "@popCount vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-2
View File
@@ -515,7 +515,6 @@ test "@ptrCast single-item pointer to slice with length 1" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest(comptime T: type, ptr: *const T) !void {
@@ -536,7 +535,6 @@ test "@ptrCast single-item pointer to slice of bytes" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest(comptime T: type, ptr: *const T) !void {
-3
View File
@@ -5,7 +5,6 @@ const expect = std.testing.expect;
test "@select vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -36,7 +35,6 @@ fn selectVectors() !void {
test "@select arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -68,7 +66,6 @@ fn selectArrays() !void {
test "@select compare result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
const S = struct {
-3
View File
@@ -51,7 +51,6 @@ test "@shuffle int" {
test "@shuffle int strange sizes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -133,7 +132,6 @@ fn testShuffle(
test "@shuffle bool 1" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -156,7 +154,6 @@ test "@shuffle bool 1" {
test "@shuffle bool 2" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-1
View File
@@ -112,7 +112,6 @@ test "@truncate > 128 bits" {
test "truncate on vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-1
View File
@@ -2184,7 +2184,6 @@ test "matching captures causes union equivalence" {
test "signed enum tag with negative value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const Enum = enum(i8) {
+4 -49
View File
@@ -9,7 +9,6 @@ const expectEqual = std.testing.expectEqual;
test "implicit cast vector to array - bool" {
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -33,7 +32,6 @@ test "implicit cast vector to array - bool" {
test "implicit cast array to vector - bool" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -57,7 +55,6 @@ test "implicit cast array to vector - bool" {
test "vector wrap operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -81,7 +78,6 @@ test "vector wrap operators" {
test "vector bin compares with mem.eql" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -106,7 +102,6 @@ test "vector bin compares with mem.eql" {
test "vector int operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -129,7 +124,6 @@ test "vector int operators" {
test "vector float operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -173,7 +167,6 @@ test "vector float operators" {
test "vector bit operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -228,7 +221,6 @@ test "array to vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -247,7 +239,6 @@ test "array vector coercion - odd sizes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
@@ -286,7 +277,6 @@ test "array to vector with element type coercion" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -304,7 +294,6 @@ test "array to vector with element type coercion" {
}
test "peer type resolution with coercible element types" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -323,7 +312,6 @@ test "peer type resolution with coercible element types" {
test "tuple to vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -346,7 +334,6 @@ test "tuple to vector" {
test "vector casts of sizes not divisible by 8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -493,7 +480,6 @@ test "initialize vector which is a struct field" {
test "vector comparison operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -539,7 +525,6 @@ test "vector comparison operators" {
test "vector division operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -632,7 +617,6 @@ test "vector division operators" {
test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -671,7 +655,6 @@ test "vector bitwise not operator" {
test "vector boolean not operator" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -700,7 +683,6 @@ test "vector boolean not operator" {
test "vector shift operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -772,7 +754,6 @@ test "vector shift operators" {
test "vector reduce operation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -902,7 +883,6 @@ test "vector reduce operation" {
}
test "vector @reduce comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -919,7 +899,6 @@ test "vector @reduce comptime" {
test "saturating add" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1020,7 +999,6 @@ test "saturating add" {
test "saturating subtraction" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1113,7 +1091,6 @@ test "saturating subtraction" {
test "saturating multiplication" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1141,7 +1118,6 @@ test "saturating multiplication" {
test "saturating shift-left" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1165,7 +1141,6 @@ test "saturating shift-left" {
test "multiplication-assignment operator with an array operand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1186,7 +1161,6 @@ test "multiplication-assignment operator with an array operand" {
test "@addWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1235,7 +1209,6 @@ test "@addWithOverflow" {
test "@subWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1268,7 +1241,6 @@ test "@subWithOverflow" {
test "@mulWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1290,7 +1262,6 @@ test "@mulWithOverflow" {
test "@shlWithOverflow" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1313,28 +1284,27 @@ test "@shlWithOverflow" {
test "alignment of vectors" {
try expect(@alignOf(@Vector(2, u8)) == switch (builtin.zig_backend) {
else => 2,
.stage2_c => @alignOf(u8),
.stage2_c, .stage2_wasm => @alignOf(u8),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(2, u1)) == switch (builtin.zig_backend) {
else => 1,
.stage2_c => @alignOf(u1),
.stage2_c, .stage2_wasm => @alignOf(u1),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(1, u1)) == switch (builtin.zig_backend) {
else => 1,
.stage2_c => @alignOf(u1),
.stage2_c, .stage2_wasm => @alignOf(u1),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(2, u16)) == switch (builtin.zig_backend) {
else => 4,
.stage2_c => @alignOf(u16),
.stage2_c, .stage2_wasm => @alignOf(u16),
.stage2_x86_64 => 16,
});
}
test "loading the second vector from a slice of vectors" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1351,7 +1321,6 @@ test "loading the second vector from a slice of vectors" {
test "array of vectors is copied" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1375,7 +1344,6 @@ test "array of vectors is copied" {
test "byte vector initialized in inline function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1401,7 +1369,6 @@ test "byte vector initialized in inline function" {
}
test "zero divisor" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1420,7 +1387,6 @@ test "zero divisor" {
}
test "zero multiplicand" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1444,7 +1410,6 @@ test "zero multiplicand" {
}
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1469,7 +1434,6 @@ test "modRem with zero divisor" {
test "array operands to shuffle are coerced to vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1486,7 +1450,6 @@ test "load packed vector element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var x: @Vector(2, u15) = .{ 1, 4 };
@@ -1498,7 +1461,6 @@ test "store packed vector element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64_be and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
@@ -1513,7 +1475,6 @@ test "store packed vector element" {
test "store to vector in slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1531,7 +1492,6 @@ test "store to vector in slice" {
test "store vector with memset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -1574,7 +1534,6 @@ test "addition of vectors represented as strings" {
test "compare vectors with different element types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1586,7 +1545,6 @@ test "compare vectors with different element types" {
test "vector pointer is indexable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const V = @Vector(2, u32);
@@ -1609,7 +1567,6 @@ test "vector pointer is indexable" {
test "boolean vector with 2 or more booleans" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const vec1 = @Vector(2, bool){ true, true };
@@ -1622,7 +1579,6 @@ test "boolean vector with 2 or more booleans" {
test "bitcast to vector with different child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1667,7 +1623,6 @@ test "arithmetic on zero-length vectors" {
}
test "@reduce on bool vector" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const a = @Vector(2, bool){ true, true };
+1 -1
View File
@@ -1473,7 +1473,7 @@ const module_test_targets = blk: {
.os_tag = .wasi,
.abi = .none,
},
.skip_modules = &.{ "compiler-rt", "std" },
.skip_modules = &.{"compiler-rt"},
.use_llvm = false,
.use_lld = false,
},