Revert "Revert "Merge pull request #17657 from Snektron/spirv-recursive-ptrs""

This reverts commit 9f0359d78f in an attempt to
make the tests pass again. The CI failure from that merge should be unrelated
to this commit.
This commit is contained in:
Robin Voetter
2023-10-22 22:17:50 +02:00
committed by Andrew Kelley
parent 6bf554f9a7
commit 4bf27da6a6
21 changed files with 484 additions and 487 deletions
+292 -309
View File
@@ -209,6 +209,10 @@ const DeclGen = struct {
/// See Object.type_map
type_map: *TypeMap,
/// Child types of pointers that are currently in progress of being resolved. If a pointer
/// is already in this map, its recursive.
wip_pointers: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, CacheRef) = .{},
/// We need to keep track of result ids for block labels, as well as the 'incoming'
/// blocks for a block.
blocks: BlockMap = .{},
@@ -295,6 +299,7 @@ const DeclGen = struct {
pub fn deinit(self: *DeclGen) void {
self.args.deinit(self.gpa);
self.inst_results.deinit(self.gpa);
self.wip_pointers.deinit(self.gpa);
self.blocks.deinit(self.gpa);
self.func.deinit(self.gpa);
self.base_line_stack.deinit(self.gpa);
@@ -358,8 +363,7 @@ const DeclGen = struct {
const mod = self.module;
const ty = mod.intern_pool.typeOf(val).toType();
const ty_ref = try self.resolveType(ty, .indirect);
const ptr_ty_ref = try self.spv.ptrType(ty_ref, storage_class);
const ptr_ty_ref = try self.ptrType(ty, storage_class);
const var_id = self.spv.declPtr(spv_decl_index).result_id;
@@ -582,66 +586,41 @@ const DeclGen = struct {
}
/// Construct a struct at runtime.
/// result_ty_ref must be a struct type.
/// ty must be a struct type.
/// Constituents should be in `indirect` representation (as the elements of a struct should be).
/// Result is in `direct` representation.
fn constructStruct(self: *DeclGen, result_ty_ref: CacheRef, constituents: []const IdRef) !IdRef {
fn constructStruct(self: *DeclGen, ty: Type, types: []const Type, constituents: []const IdRef) !IdRef {
assert(types.len == constituents.len);
// The Khronos LLVM-SPIRV translator crashes because it cannot construct structs which'
// operands are not constant.
// See https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/1349
// For now, just initialize the struct by setting the fields manually...
// TODO: Make this OpCompositeConstruct when we can
const ptr_ty_ref = try self.spv.ptrType(result_ty_ref, .Function);
const ptr_composite_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = ptr_composite_id,
.storage_class = .Function,
});
const spv_composite_ty = self.spv.cache.lookup(result_ty_ref).struct_type;
const member_types = spv_composite_ty.member_types;
for (constituents, member_types, 0..) |constitent_id, member_ty_ref, index| {
const ptr_member_ty_ref = try self.spv.ptrType(member_ty_ref, .Function);
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
for (constituents, types, 0..) |constitent_id, member_ty, index| {
const ptr_member_ty_ref = try self.ptrType(member_ty, .Function);
const ptr_id = try self.accessChain(ptr_member_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
});
}
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
.id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.pointer = ptr_composite_id,
});
return result_id;
return try self.load(ty, ptr_composite_id, .{});
}
/// Construct an array at runtime.
/// result_ty_ref must be an array type.
/// ty must be an array type.
/// Constituents should be in `indirect` representation (as the elements of an array should be).
/// Result is in `direct` representation.
fn constructArray(self: *DeclGen, result_ty_ref: CacheRef, constituents: []const IdRef) !IdRef {
fn constructArray(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef {
// The Khronos LLVM-SPIRV translator crashes because it cannot construct structs which'
// operands are not constant.
// See https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/1349
// For now, just initialize the struct by setting the fields manually...
// TODO: Make this OpCompositeConstruct when we can
// TODO: Make this Function storage type
const ptr_ty_ref = try self.spv.ptrType(result_ty_ref, .Function);
const ptr_composite_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = ptr_composite_id,
.storage_class = .Function,
});
const spv_composite_ty = self.spv.cache.lookup(result_ty_ref).array_type;
const elem_ty_ref = spv_composite_ty.element_type;
const ptr_elem_ty_ref = try self.spv.ptrType(elem_ty_ref, .Function);
const mod = self.module;
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function);
for (constituents, 0..) |constitent_id, index| {
const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
@@ -649,13 +628,8 @@ const DeclGen = struct {
.object = constitent_id,
});
}
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
.id_result_type = self.typeId(result_ty_ref),
.id_result = result_id,
.pointer = ptr_composite_id,
});
return result_id;
return try self.load(ty, ptr_composite_id, .{});
}
/// This function generates a load for a constant in direct (ie, non-memory) representation.
@@ -767,15 +741,18 @@ const DeclGen = struct {
}.toValue();
var constituents: [2]IdRef = undefined;
var types: [2]Type = undefined;
if (eu_layout.error_first) {
constituents[0] = try self.constant(err_ty, err_val, .indirect);
constituents[1] = try self.constant(payload_ty, payload_val, .indirect);
types = .{ err_ty, payload_ty };
} else {
constituents[0] = try self.constant(payload_ty, payload_val, .indirect);
constituents[1] = try self.constant(err_ty, err_val, .indirect);
types = .{ payload_ty, err_ty };
}
return try self.constructStruct(result_ty_ref, &constituents);
return try self.constructStruct(ty, &types, &constituents);
},
.enum_tag => {
const int_val = try val.intFromEnum(ty, mod);
@@ -793,7 +770,11 @@ const DeclGen = struct {
}
const len_id = try self.constant(Type.usize, ptr.len.toValue(), .indirect);
return try self.constructStruct(result_ty_ref, &.{ ptr_id, len_id });
return try self.constructStruct(
ty,
&.{ ptr_ty, Type.usize },
&.{ ptr_id, len_id },
);
},
.opt => {
const payload_ty = ty.optionalChild(mod);
@@ -820,7 +801,11 @@ const DeclGen = struct {
else
try self.spv.constUndef(try self.resolveType(payload_ty, .indirect));
return try self.constructStruct(result_ty_ref, &.{ payload_id, has_pl_id });
return try self.constructStruct(
ty,
&.{ payload_ty, Type.bool },
&.{ payload_id, has_pl_id },
);
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.ip_index)) {
inline .array_type, .vector_type => |array_type, tag| {
@@ -858,7 +843,7 @@ const DeclGen = struct {
else => {},
}
return try self.constructArray(result_ty_ref, constituents);
return try self.constructArray(ty, constituents);
},
.struct_type => {
const struct_type = mod.typeToStruct(ty).?;
@@ -866,6 +851,9 @@ const DeclGen = struct {
return self.todo("packed struct constants", .{});
}
var types = std.ArrayList(Type).init(self.gpa);
defer types.deinit();
var constituents = std.ArrayList(IdRef).init(self.gpa);
defer constituents.deinit();
@@ -881,22 +869,23 @@ const DeclGen = struct {
const field_val = try val.fieldValue(mod, field_index);
const field_id = try self.constant(field_ty, field_val, .indirect);
try types.append(field_ty);
try constituents.append(field_id);
}
return try self.constructStruct(result_ty_ref, constituents.items);
return try self.constructStruct(ty, types.items, constituents.items);
},
.anon_struct_type => unreachable, // TODO
else => unreachable,
},
.un => |un| {
const active_field = ty.unionTagFieldIndex(un.tag.toValue(), mod).?;
const layout = self.unionLayout(ty, active_field);
const payload = if (layout.active_field_size != 0)
try self.constant(layout.active_field_ty, un.val.toValue(), .indirect)
const union_obj = mod.typeToUnion(ty).?;
const field_ty = union_obj.field_types.get(ip)[active_field].toType();
const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.constant(field_ty, un.val.toValue(), .direct)
else
null;
return try self.unionInit(ty, active_field, payload);
},
.memoized_call => unreachable,
@@ -935,8 +924,7 @@ const DeclGen = struct {
// TODO: Can we consolidate this in ptrElemPtr?
const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
const elem_ptr_ty_ref = try self.ptrType(elem_ty, spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
if (elem_ptr_ty_ref == result_ty_ref) {
return elem_ptr_id;
@@ -998,8 +986,7 @@ const DeclGen = struct {
};
const decl_id = try self.resolveAnonDecl(decl_val, actual_storage_class);
const decl_ty_ref = try self.resolveType(decl_ty, .indirect);
const decl_ptr_ty_ref = try self.spv.ptrType(decl_ty_ref, final_storage_class);
const decl_ptr_ty_ref = try self.ptrType(decl_ty, final_storage_class);
const ptr_id = switch (final_storage_class) {
.Generic => blk: {
@@ -1055,8 +1042,7 @@ const DeclGen = struct {
const final_storage_class = spvStorageClass(decl.@"addrspace");
const decl_ty_ref = try self.resolveType(decl.ty, .indirect);
const decl_ptr_ty_ref = try self.spv.ptrType(decl_ty_ref, final_storage_class);
const decl_ptr_ty_ref = try self.ptrType(decl.ty, final_storage_class);
const ptr_id = switch (final_storage_class) {
.Generic => blk: {
@@ -1124,29 +1110,52 @@ const DeclGen = struct {
return try self.intType(.unsigned, self.getTarget().ptrBitWidth());
}
/// Generate a union type, optionally with a known field. If the tag alignment is greater
/// than that of the payload, a regular union (non-packed, with both tag and payload), will
/// be generated as follows:
/// If the active field is known:
fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !CacheRef {
const key = .{ child_ty.toIntern(), storage_class };
const entry = try self.wip_pointers.getOrPut(self.gpa, key);
if (entry.found_existing) {
const fwd_ref = entry.value_ptr.*;
try self.spv.cache.recursive_ptrs.put(self.spv.gpa, fwd_ref, {});
return fwd_ref;
}
const fwd_ref = try self.spv.resolve(.{ .fwd_ptr_type = .{
.zig_child_type = child_ty.toIntern(),
.storage_class = storage_class,
} });
entry.value_ptr.* = fwd_ref;
const child_ty_ref = try self.resolveType(child_ty, .indirect);
_ = try self.spv.resolve(.{ .ptr_type = .{
.storage_class = storage_class,
.child_type = child_ty_ref,
.fwd = fwd_ref,
} });
assert(self.wip_pointers.remove(key));
return fwd_ref;
}
/// Generate a union type. Union types are always generated with the
/// most aligned field active. If the tag alignment is greater
/// than that of the payload, a regular union (non-packed, with both tag and
/// payload), will be generated as follows:
/// struct {
/// tag: TagType,
/// payload: ActivePayloadType,
/// payload_padding: [payload_size - @sizeOf(ActivePayloadType)]u8,
/// payload: MostAlignedFieldType,
/// payload_padding: [payload_size - @sizeOf(MostAlignedFieldType)]u8,
/// padding: [padding_size]u8,
/// }
/// If the payload alignment is greater than that of the tag:
/// struct {
/// payload: ActivePayloadType,
/// payload_padding: [payload_size - @sizeOf(ActivePayloadType)]u8,
/// payload: MostAlignedFieldType,
/// payload_padding: [payload_size - @sizeOf(MostAlignedFieldType)]u8,
/// tag: TagType,
/// padding: [padding_size]u8,
/// }
/// If the active payload is unknown, it will default back to the most aligned field. This is
/// to make sure that the overal struct has the correct alignment in spir-v.
/// If any of the fields' size is 0, it will be omitted.
/// NOTE: When the active field is set to something other than the most aligned field, the
/// resulting struct will be *underaligned*.
fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef {
fn resolveUnionType(self: *DeclGen, ty: Type) !CacheRef {
const mod = self.module;
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
@@ -1155,17 +1164,13 @@ const DeclGen = struct {
return self.todo("packed union types", .{});
}
const layout = self.unionLayout(ty, maybe_active_field);
if (layout.payload_size == 0) {
const layout = self.unionLayout(ty);
if (!layout.has_payload) {
// No payload, so represent this as just the tag type.
return try self.resolveType(union_obj.enum_tag_ty.toType(), .indirect);
}
// TODO: We need to add the active field to the key, somehow.
if (maybe_active_field == null) {
if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
}
if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
var member_types: [4]CacheRef = undefined;
var member_names: [4]CacheString = undefined;
@@ -1178,10 +1183,10 @@ const DeclGen = struct {
member_names[layout.tag_index] = try self.spv.resolveString("(tag)");
}
if (layout.active_field_size != 0) {
const active_payload_ty_ref = try self.resolveType(layout.active_field_ty, .indirect);
member_types[layout.active_field_index] = active_payload_ty_ref;
member_names[layout.active_field_index] = try self.spv.resolveString("(payload)");
if (layout.payload_size != 0) {
const payload_ty_ref = try self.resolveType(layout.payload_ty, .indirect);
member_types[layout.payload_index] = payload_ty_ref;
member_names[layout.payload_index] = try self.spv.resolveString("(payload)");
}
if (layout.payload_padding_size != 0) {
@@ -1202,9 +1207,7 @@ const DeclGen = struct {
.member_names = member_names[0..layout.total_fields],
} });
if (maybe_active_field == null) {
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
}
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
}
@@ -1352,12 +1355,12 @@ const DeclGen = struct {
.Pointer => {
const ptr_info = ty.ptrInfo(mod);
// Note: Don't cache this pointer type, it would mess up the recursive pointer functionality
// in ptrType()!
const storage_class = spvStorageClass(ptr_info.flags.address_space);
const child_ty_ref = try self.resolveType(ptr_info.child.toType(), .indirect);
const ptr_ty_ref = try self.spv.resolve(.{ .ptr_type = .{
.storage_class = storage_class,
.child_type = child_ty_ref,
} });
const ptr_ty_ref = try self.ptrType(ptr_info.child.toType(), storage_class);
if (ptr_info.flags.size != .Slice) {
return ptr_ty_ref;
}
@@ -1472,7 +1475,7 @@ const DeclGen = struct {
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
},
.Union => return try self.resolveUnionType(ty, null),
.Union => return try self.resolveUnionType(ty),
.ErrorSet => return try self.intType(.unsigned, 16),
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(mod);
@@ -1586,14 +1589,16 @@ const DeclGen = struct {
}
const UnionLayout = struct {
active_field: u32,
active_field_ty: Type,
payload_size: u32,
/// If false, this union is represented
/// by only an integer of the tag type.
has_payload: bool,
tag_size: u32,
tag_index: u32,
active_field_size: u32,
active_field_index: u32,
/// Note: This is the size of the payload type itself, NOT the size of the ENTIRE payload.
/// Use `has_payload` instead!!
payload_ty: Type,
payload_size: u32,
payload_index: u32,
payload_padding_size: u32,
payload_padding_index: u32,
padding_size: u32,
@@ -1601,23 +1606,19 @@ const DeclGen = struct {
total_fields: u32,
};
fn unionLayout(self: *DeclGen, ty: Type, maybe_active_field: ?usize) UnionLayout {
fn unionLayout(self: *DeclGen, ty: Type) UnionLayout {
const mod = self.module;
const ip = &mod.intern_pool;
const layout = ty.unionGetLayout(self.module);
const union_obj = mod.typeToUnion(ty).?;
const active_field = maybe_active_field orelse layout.most_aligned_field;
const active_field_ty = union_obj.field_types.get(ip)[active_field].toType();
var union_layout = UnionLayout{
.active_field = @intCast(active_field),
.active_field_ty = active_field_ty,
.payload_size = @intCast(layout.payload_size),
.has_payload = layout.payload_size != 0,
.tag_size = @intCast(layout.tag_size),
.tag_index = undefined,
.active_field_size = undefined,
.active_field_index = undefined,
.payload_ty = undefined,
.payload_size = undefined,
.payload_index = undefined,
.payload_padding_size = undefined,
.payload_padding_index = undefined,
.padding_size = @intCast(layout.padding),
@@ -1625,11 +1626,16 @@ const DeclGen = struct {
.total_fields = undefined,
};
union_layout.active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod))
@intCast(active_field_ty.abiSize(mod))
else
0;
union_layout.payload_padding_size = @intCast(layout.payload_size - union_layout.active_field_size);
if (union_layout.has_payload) {
const most_aligned_field = layout.most_aligned_field;
const most_aligned_field_ty = union_obj.field_types.get(ip)[most_aligned_field].toType();
union_layout.payload_ty = most_aligned_field_ty;
union_layout.payload_size = @intCast(most_aligned_field_ty.abiSize(mod));
} else {
union_layout.payload_size = 0;
}
union_layout.payload_padding_size = @intCast(layout.payload_size - union_layout.payload_size);
const tag_first = layout.tag_align.compare(.gte, layout.payload_align);
var field_index: u32 = 0;
@@ -1639,8 +1645,8 @@ const DeclGen = struct {
field_index += 1;
}
if (union_layout.active_field_size != 0) {
union_layout.active_field_index = field_index;
if (union_layout.payload_size != 0) {
union_layout.payload_index = field_index;
field_index += 1;
}
@@ -1684,7 +1690,7 @@ const DeclGen = struct {
/// the name of an error in the text executor.
fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
const anyerror_ty_ref = try self.resolveType(Type.anyerror, .direct);
const ptr_anyerror_ty_ref = try self.spv.ptrType(anyerror_ty_ref, .CrossWorkgroup);
const ptr_anyerror_ty_ref = try self.ptrType(Type.anyerror, .CrossWorkgroup);
const void_ty_ref = try self.resolveType(Type.void, .direct);
const kernel_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
@@ -1719,6 +1725,7 @@ const DeclGen = struct {
.id_result = error_id,
.function = test_id,
});
// Note: Convert to direct not required.
try section.emit(self.spv.gpa, .OpStore, .{
.pointer = p_error_id,
.object = error_id,
@@ -1823,8 +1830,7 @@ const DeclGen = struct {
else => final_storage_class,
};
const ty_ref = try self.resolveType(decl.ty, .indirect);
const ptr_ty_ref = try self.spv.ptrType(ty_ref, actual_storage_class);
const ptr_ty_ref = try self.ptrType(decl.ty, actual_storage_class);
const begin = self.spv.beginGlobal();
try self.spv.globals.section.emit(self.spv.gpa, .OpVariable, .{
@@ -1929,11 +1935,15 @@ const DeclGen = struct {
return try self.convertToDirect(result_ty, result_id);
}
fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, is_volatile: bool) !IdRef {
const MemoryOptions = struct {
is_volatile: bool = false,
};
fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
.Volatile = is_volatile,
.Volatile = options.is_volatile,
};
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
.id_result_type = self.typeId(indirect_value_ty_ref),
@@ -1944,10 +1954,10 @@ const DeclGen = struct {
return try self.convertToDirect(value_ty, result_id);
}
fn store(self: *DeclGen, value_ty: Type, ptr_id: IdRef, value_id: IdRef, is_volatile: bool) !void {
fn store(self: *DeclGen, value_ty: Type, ptr_id: IdRef, value_id: IdRef, options: MemoryOptions) !void {
const indirect_value_id = try self.convertToIndirect(value_ty, value_id);
const access = spec.MemoryAccess.Extended{
.Volatile = is_volatile,
.Volatile = options.is_volatile,
};
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
@@ -2119,9 +2129,7 @@ const DeclGen = struct {
constituent.* = try self.convertToIndirect(child_ty, result_id);
}
const result_ty = try self.resolveType(child_ty, .indirect);
const result_ty_ref = try self.spv.arrayType(vector_len, result_ty);
return try self.constructArray(result_ty_ref, constituents);
return try self.constructArray(ty, constituents);
}
const result_id = self.spv.allocId();
@@ -2182,7 +2190,7 @@ const DeclGen = struct {
const info = try self.arithmeticTypeInfo(result_ty);
// TODO: Use fmin for OpenCL
const cmp_id = try self.cmp(op, result_ty, lhs_id, rhs_id);
const cmp_id = try self.cmp(op, Type.bool, result_ty, lhs_id, rhs_id);
const selection_id = switch (info.class) {
.float => blk: {
// cmp uses OpFOrd. When we have 0 [<>] nan this returns false,
@@ -2317,7 +2325,7 @@ const DeclGen = struct {
constituent.* = try self.arithOp(child_ty, lhs_index_id, rhs_index_id, fop, sop, uop, modular);
}
return self.constructArray(result_ty_ref, constituents);
return self.constructArray(ty, constituents);
}
// Binary operations are generally applicable to both scalar and vector operations
@@ -2473,11 +2481,11 @@ const DeclGen = struct {
// Construct the struct that Zig wants as result.
// The value should already be the correct type.
const ov_id = try self.intFromBool(ov_ty_ref, overflowed_id);
const result_ty_ref = try self.resolveType(result_ty, .direct);
return try self.constructStruct(result_ty_ref, &.{
value_id,
ov_id,
});
return try self.constructStruct(
result_ty,
&.{ operand_ty, ov_ty },
&.{ value_id, ov_id },
);
}
fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -2635,6 +2643,7 @@ const DeclGen = struct {
fn cmp(
self: *DeclGen,
op: std.math.CompareOperator,
result_ty: Type,
ty: Type,
lhs_id: IdRef,
rhs_id: IdRef,
@@ -2675,7 +2684,7 @@ const DeclGen = struct {
if (ty.optionalReprIsPayload(mod)) {
assert(payload_ty.hasRuntimeBitsIgnoreComptime(mod));
assert(!payload_ty.isSlice(mod));
return self.cmp(op, payload_ty, lhs_id, rhs_id);
return self.cmp(op, Type.bool, payload_ty, lhs_id, rhs_id);
}
const lhs_valid_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
@@ -2688,7 +2697,7 @@ const DeclGen = struct {
else
try self.convertToDirect(Type.bool, rhs_id);
const valid_cmp_id = try self.cmp(op, Type.bool, lhs_valid_id, rhs_valid_id);
const valid_cmp_id = try self.cmp(op, Type.bool, Type.bool, lhs_valid_id, rhs_valid_id);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
return valid_cmp_id;
}
@@ -2699,7 +2708,7 @@ const DeclGen = struct {
const lhs_pl_id = try self.extractField(payload_ty, lhs_id, 0);
const rhs_pl_id = try self.extractField(payload_ty, rhs_id, 0);
const pl_cmp_id = try self.cmp(op, payload_ty, lhs_pl_id, rhs_pl_id);
const pl_cmp_id = try self.cmp(op, Type.bool, payload_ty, lhs_pl_id, rhs_pl_id);
// op == .eq => lhs_valid == rhs_valid && lhs_pl == rhs_pl
// op == .neq => lhs_valid != rhs_valid || lhs_pl != rhs_pl
@@ -2721,7 +2730,6 @@ const DeclGen = struct {
.Vector => {
const child_ty = ty.childType(mod);
const vector_len = ty.vectorLen(mod);
const bool_ty_ref_indirect = try self.resolveType(Type.bool, .indirect);
var constituents = try self.gpa.alloc(IdRef, vector_len);
defer self.gpa.free(constituents);
@@ -2729,12 +2737,11 @@ const DeclGen = struct {
for (constituents, 0..) |*constituent, i| {
const lhs_index_id = try self.extractField(child_ty, cmp_lhs_id, @intCast(i));
const rhs_index_id = try self.extractField(child_ty, cmp_rhs_id, @intCast(i));
const result_id = try self.cmp(op, child_ty, lhs_index_id, rhs_index_id);
const result_id = try self.cmp(op, Type.bool, child_ty, lhs_index_id, rhs_index_id);
constituent.* = try self.convertToIndirect(Type.bool, result_id);
}
const result_ty_ref = try self.spv.arrayType(vector_len, bool_ty_ref_indirect);
return try self.constructArray(result_ty_ref, constituents);
return try self.constructArray(result_ty, constituents);
},
else => unreachable,
};
@@ -2807,8 +2814,9 @@ const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const ty = self.typeOf(bin_op.lhs);
const result_ty = self.typeOfIndex(inst);
return try self.cmp(op, ty, lhs_id, rhs_id);
return try self.cmp(op, result_ty, ty, lhs_id, rhs_id);
}
fn airVectorCmp(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -2820,8 +2828,9 @@ const DeclGen = struct {
const rhs_id = try self.resolve(vec_cmp.rhs);
const op = vec_cmp.compareOperator();
const ty = self.typeOf(vec_cmp.lhs);
const result_ty = self.typeOfIndex(inst);
return try self.cmp(op, ty, lhs_id, rhs_id);
return try self.cmp(op, result_ty, ty, lhs_id, rhs_id);
}
fn bitCast(
@@ -2866,23 +2875,17 @@ const DeclGen = struct {
return result_id;
}
const src_ptr_ty_ref = try self.spv.ptrType(src_ty_ref, .Function);
const dst_ptr_ty_ref = try self.spv.ptrType(dst_ty_ref, .Function);
const dst_ptr_ty_ref = try self.ptrType(dst_ty, .Function);
const tmp_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(src_ptr_ty_ref),
.id_result = tmp_id,
.storage_class = .Function,
});
try self.store(src_ty, tmp_id, src_id, false);
const tmp_id = try self.alloc(src_ty, .{ .storage_class = .Function });
try self.store(src_ty, tmp_id, src_id, .{});
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = self.typeId(dst_ptr_ty_ref),
.id_result = casted_ptr_id,
.operand = tmp_id,
});
return try self.load(dst_ty, casted_ptr_id, false);
return try self.load(dst_ty, casted_ptr_id, .{});
}
fn airBitCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3061,7 +3064,6 @@ const DeclGen = struct {
const elem_ptr_ty = slice_ty.slicePtrFieldType(mod);
const elem_ptr_ty_ref = try self.resolveType(elem_ptr_ty, .direct);
const slice_ty_ref = try self.resolveType(slice_ty, .direct);
const size_ty_ref = try self.sizeType();
const array_ptr_id = try self.resolve(ty_op.operand);
@@ -3074,7 +3076,11 @@ const DeclGen = struct {
// Convert the pointer-to-array to a pointer to the first element.
try self.accessChain(elem_ptr_ty_ref, array_ptr_id, &.{0});
return try self.constructStruct(slice_ty_ref, &.{ elem_ptr_id, len_id });
return try self.constructStruct(
slice_ty,
&.{ elem_ptr_ty, Type.usize },
&.{ elem_ptr_id, len_id },
);
}
fn airSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3084,13 +3090,16 @@ const DeclGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_id = try self.resolve(bin_op.lhs);
const len_id = try self.resolve(bin_op.rhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const slice_ty = self.typeOfIndex(inst);
const slice_ty_ref = try self.resolveType(slice_ty, .direct);
return try self.constructStruct(slice_ty_ref, &.{
ptr_id, // Note: Type should not need to be converted to direct.
len_id, // Note: Type should not need to be converted to direct.
});
// Note: Types should not need to be converted to direct, these types
// dont need to be converted.
return try self.constructStruct(
slice_ty,
&.{ ptr_ty, Type.usize },
&.{ ptr_id, len_id },
);
}
fn airAggregateInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3100,7 +3109,6 @@ const DeclGen = struct {
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const result_ty = self.typeOfIndex(inst);
const result_ty_ref = try self.resolveType(result_ty, .direct);
const len: usize = @intCast(result_ty.arrayLen(mod));
const elements: []const Air.Inst.Ref = @ptrCast(self.air.extra[ty_pl.payload..][0..len]);
@@ -3112,6 +3120,8 @@ const DeclGen = struct {
unreachable; // TODO
}
const types = try self.gpa.alloc(Type, elements.len);
defer self.gpa.free(types);
const constituents = try self.gpa.alloc(IdRef, elements.len);
defer self.gpa.free(constituents);
var index: usize = 0;
@@ -3123,6 +3133,7 @@ const DeclGen = struct {
assert(field_ty.toType().hasRuntimeBits(mod));
const id = try self.resolve(element);
types[index] = field_ty.toType();
constituents[index] = try self.convertToIndirect(field_ty.toType(), id);
index += 1;
}
@@ -3136,6 +3147,7 @@ const DeclGen = struct {
assert(field_ty.hasRuntimeBitsIgnoreComptime(mod));
const id = try self.resolve(element);
types[index] = field_ty;
constituents[index] = try self.convertToIndirect(field_ty, id);
index += 1;
}
@@ -3143,7 +3155,11 @@ const DeclGen = struct {
else => unreachable,
}
return try self.constructStruct(result_ty_ref, constituents[0..index]);
return try self.constructStruct(
result_ty,
types[0..index],
constituents[0..index],
);
},
.Array => {
const array_info = result_ty.arrayInfo(mod);
@@ -3160,7 +3176,7 @@ const DeclGen = struct {
elem_ids[n_elems - 1] = try self.constant(array_info.elem_type, sentinel_val, .indirect);
}
return try self.constructArray(result_ty_ref, elem_ids);
return try self.constructArray(result_ty, elem_ids);
},
else => unreachable,
}
@@ -3245,15 +3261,14 @@ const DeclGen = struct {
const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
const elem_ptr = try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
return try self.load(slice_ty.childType(mod), elem_ptr, slice_ty.isVolatilePtr(mod));
return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) });
}
fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef {
const mod = self.module;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
const elem_ty_ref = try self.resolveType(elem_ty, .direct);
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
const elem_ptr_ty_ref = try self.ptrType(elem_ty, spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
if (ptr_ty.isSinglePointer(mod)) {
// Pointer-to-array. In this case, the resulting pointer is not of the same type
// as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
@@ -3289,9 +3304,7 @@ const DeclGen = struct {
const mod = self.module;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const array_ty = self.typeOf(bin_op.lhs);
const array_ty_ref = try self.resolveType(array_ty, .direct);
const elem_ty = array_ty.childType(mod);
const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
const array_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
@@ -3299,22 +3312,12 @@ const DeclGen = struct {
// For now, just generate a temporary and use that.
// TODO: This backend probably also should use isByRef from llvm...
const array_ptr_ty_ref = try self.spv.ptrType(array_ty_ref, .Function);
const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, .Function);
const tmp_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(array_ptr_ty_ref),
.id_result = tmp_id,
.storage_class = .Function,
});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = tmp_id,
.object = array_id,
});
const elem_ptr_ty_ref = try self.ptrType(elem_ty, .Function);
const tmp_id = try self.alloc(array_ty, .{ .storage_class = .Function });
try self.store(array_ty, tmp_id, array_id, .{});
const elem_ptr_id = try self.accessChainId(elem_ptr_ty_ref, tmp_id, &.{index_id});
return try self.load(elem_ty, elem_ptr_id, false);
return try self.load(elem_ty, elem_ptr_id, .{});
}
fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3327,7 +3330,7 @@ const DeclGen = struct {
const ptr_id = try self.resolve(bin_op.lhs);
const index_id = try self.resolve(bin_op.rhs);
const elem_ptr_id = try self.ptrElemPtr(ptr_ty, ptr_id, index_id);
return try self.load(elem_ty, elem_ptr_id, ptr_ty.isVolatilePtr(mod));
return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
}
fn airSetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -3335,22 +3338,21 @@ const DeclGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const un_ptr_ty = self.typeOf(bin_op.lhs);
const un_ty = un_ptr_ty.childType(mod);
const layout = self.unionLayout(un_ty, null);
const layout = self.unionLayout(un_ty);
if (layout.tag_size == 0) return;
const tag_ty = un_ty.unionTagTypeSafety(mod).?;
const tag_ty_ref = try self.resolveType(tag_ty, .indirect);
const tag_ptr_ty_ref = try self.spv.ptrType(tag_ty_ref, spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
const tag_ptr_ty_ref = try self.ptrType(tag_ty, spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
const union_ptr_id = try self.resolve(bin_op.lhs);
const new_tag_id = try self.resolve(bin_op.rhs);
if (layout.payload_size == 0) {
try self.store(tag_ty, union_ptr_id, new_tag_id, un_ptr_ty.isVolatilePtr(mod));
if (!layout.has_payload) {
try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
} else {
const ptr_id = try self.accessChain(tag_ptr_ty_ref, union_ptr_id, &.{layout.tag_index});
try self.store(tag_ty, ptr_id, new_tag_id, un_ptr_ty.isVolatilePtr(mod));
try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
}
}
@@ -3361,11 +3363,11 @@ const DeclGen = struct {
const un_ty = self.typeOf(ty_op.operand);
const mod = self.module;
const layout = self.unionLayout(un_ty, null);
const layout = self.unionLayout(un_ty);
if (layout.tag_size == 0) return null;
const union_handle = try self.resolve(ty_op.operand);
if (layout.payload_size == 0) return union_handle;
if (!layout.has_payload) return union_handle;
const tag_ty = un_ty.unionTagTypeSafety(mod).?;
return try self.extractField(tag_ty, union_handle, layout.tag_index);
@@ -3378,8 +3380,8 @@ const DeclGen = struct {
payload: ?IdRef,
) !IdRef {
// To initialize a union, generate a temporary variable with the
// type that has the right field active, then pointer-cast and store
// the active field, and finally load and return the entire union.
// union type, then get the field pointer and pointer-cast it to the
// right type to store it. Finally load the entire union.
const mod = self.module;
const ip = &mod.intern_pool;
@@ -3390,7 +3392,7 @@ const DeclGen = struct {
}
const maybe_tag_ty = ty.unionTagTypeSafety(mod);
const layout = self.unionLayout(ty, active_field);
const layout = self.unionLayout(ty);
const tag_int = if (layout.tag_size != 0) blk: {
const tag_ty = maybe_tag_ty.?;
@@ -3401,42 +3403,34 @@ const DeclGen = struct {
break :blk tag_int_val.toUnsignedInt(mod);
} else 0;
if (layout.payload_size == 0) {
if (!layout.has_payload) {
const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
return try self.constInt(tag_ty_ref, tag_int);
}
const un_active_ty_ref = try self.resolveUnionType(ty, active_field);
const un_active_ptr_ty_ref = try self.spv.ptrType(un_active_ty_ref, .Function);
const un_general_ty_ref = try self.resolveType(ty, .direct);
const un_general_ptr_ty_ref = try self.spv.ptrType(un_general_ty_ref, .Function);
const tmp_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(un_active_ptr_ty_ref),
.id_result = tmp_id,
.storage_class = .Function,
});
const tmp_id = try self.alloc(ty, .{ .storage_class = .Function });
if (layout.tag_size != 0) {
const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
const tag_ptr_ty_ref = try self.spv.ptrType(tag_ty_ref, .Function);
const tag_ptr_ty_ref = try self.ptrType(maybe_tag_ty.?, .Function);
const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
const tag_id = try self.constInt(tag_ty_ref, tag_int);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = tag_id,
});
try self.store(maybe_tag_ty.?, ptr_id, tag_id, .{});
}
if (layout.active_field_size != 0) {
const active_field_ty_ref = try self.resolveType(layout.active_field_ty, .indirect);
const active_field_ptr_ty_ref = try self.spv.ptrType(active_field_ty_ref, .Function);
const ptr_id = try self.accessChain(active_field_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.active_field_index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = payload.?,
const payload_ty = union_ty.field_types.get(ip)[active_field].toType();
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
const active_pl_ptr_ty_ref = try self.ptrType(payload_ty, .Function);
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = self.typeId(active_pl_ptr_ty_ref),
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
try self.store(payload_ty, active_pl_ptr_id, payload.?, .{});
} else {
assert(payload == null);
}
@@ -3444,34 +3438,21 @@ const DeclGen = struct {
// Just leave the padding fields uninitialized...
// TODO: Or should we initialize them with undef explicitly?
// Now cast the pointer and load it as the 'generic' union type.
const casted_var_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = self.typeId(un_general_ptr_ty_ref),
.id_result = casted_var_id,
.operand = tmp_id,
});
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
.id_result_type = self.typeId(un_general_ty_ref),
.id_result = result_id,
.pointer = casted_var_id,
});
return result_id;
return try self.load(ty, tmp_id, .{});
}
fn airUnionInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
if (self.liveness.isUnused(inst)) return null;
const mod = self.module;
const ip = &mod.intern_pool;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
const ty = self.typeOfIndex(inst);
const layout = self.unionLayout(ty, extra.field_index);
const payload = if (layout.active_field_size != 0)
const union_obj = mod.typeToUnion(ty).?;
const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
const payload = if (field_ty.hasRuntimeBitsIgnoreComptime(mod))
try self.resolve(extra.init)
else
null;
@@ -3500,30 +3481,24 @@ const DeclGen = struct {
.Union => switch (object_ty.containerLayout(mod)) {
.Packed => unreachable, // TODO
else => {
// Store, pointer-cast, load
const un_general_ty_ref = try self.resolveType(object_ty, .indirect);
const un_general_ptr_ty_ref = try self.spv.ptrType(un_general_ty_ref, .Function);
const un_active_ty_ref = try self.resolveUnionType(object_ty, field_index);
const un_active_ptr_ty_ref = try self.spv.ptrType(un_active_ty_ref, .Function);
const field_ty_ref = try self.resolveType(field_ty, .indirect);
const field_ptr_ty_ref = try self.spv.ptrType(field_ty_ref, .Function);
// Store, ptr-elem-ptr, pointer-cast, load
const layout = self.unionLayout(object_ty);
assert(layout.has_payload);
const tmp_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(un_general_ptr_ty_ref),
.id_result = tmp_id,
.storage_class = .Function,
});
try self.store(object_ty, tmp_id, object_id, false);
const casted_tmp_id = self.spv.allocId();
const tmp_id = try self.alloc(object_ty, .{ .storage_class = .Function });
try self.store(object_ty, tmp_id, object_id, .{});
const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
const active_pl_ptr_ty_ref = try self.ptrType(field_ty, .Function);
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = self.typeId(un_active_ptr_ty_ref),
.id_result = casted_tmp_id,
.operand = tmp_id,
.id_result_type = self.typeId(active_pl_ptr_ty_ref),
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
const layout = self.unionLayout(object_ty, field_index);
const field_ptr_id = try self.accessChain(field_ptr_ty_ref, casted_tmp_id, &.{layout.active_field_index});
return try self.load(field_ty, field_ptr_id, false);
return try self.load(field_ty, active_pl_ptr_id, .{});
},
},
else => unreachable,
@@ -3582,18 +3557,24 @@ const DeclGen = struct {
.Union => switch (object_ty.containerLayout(mod)) {
.Packed => unreachable, // TODO
else => {
const storage_class = spvStorageClass(object_ptr_ty.ptrAddressSpace(mod));
const un_active_ty_ref = try self.resolveUnionType(object_ty, field_index);
const un_active_ptr_ty_ref = try self.spv.ptrType(un_active_ty_ref, storage_class);
const layout = self.unionLayout(object_ty);
if (!layout.has_payload) {
// Asked to get a pointer to a zero-sized field. Just lower this
// to undefined, there is no reason to make it be a valid pointer.
return try self.spv.constUndef(result_ty_ref);
}
const casted_id = self.spv.allocId();
const storage_class = spvStorageClass(object_ptr_ty.ptrAddressSpace(mod));
const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, storage_class);
const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, object_ptr, &.{layout.payload_index});
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
.id_result_type = self.typeId(un_active_ptr_ty_ref),
.id_result = casted_id,
.operand = object_ptr,
.id_result_type = self.typeId(result_ty_ref),
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
const layout = self.unionLayout(object_ty, field_index);
return try self.accessChain(result_ty_ref, casted_id, &.{layout.active_field_index});
return active_pl_ptr_id;
},
},
else => unreachable,
@@ -3609,23 +3590,13 @@ const DeclGen = struct {
return try self.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index);
}
/// We cannot use an OpVariable directly in an OpSpecConstantOp, but we can
/// after we insert a dummy AccessChain...
/// TODO: Get rid of this
fn makePointerConstant(
self: *DeclGen,
section: *SpvSection,
ptr_ty_ref: CacheRef,
ptr_id: IdRef,
) !IdRef {
const result_id = self.spv.allocId();
try section.emitSpecConstantOp(self.spv.gpa, .OpInBoundsAccessChain, .{
.id_result_type = self.typeId(ptr_ty_ref),
.id_result = result_id,
.base = ptr_id,
});
return result_id;
}
const AllocOptions = struct {
initializer: ?IdRef = null,
/// The final storage class of the pointer. This may be either `.Generic` or `.Function`.
/// In either case, the local is allocated in the `.Function` storage class, and optionally
/// cast back to `.Generic`.
storage_class: StorageClass = .Generic,
};
// Allocate a function-local variable, with possible initializer.
// This function returns a pointer to a variable of type `ty_ref`,
@@ -3633,30 +3604,36 @@ const DeclGen = struct {
// placed in the Function address space.
fn alloc(
self: *DeclGen,
ty_ref: CacheRef,
initializer: ?IdRef,
ty: Type,
options: AllocOptions,
) !IdRef {
const fn_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Function);
const general_ptr_ty_ref = try self.spv.ptrType(ty_ref, .Generic);
const ptr_fn_ty_ref = try self.ptrType(ty, .Function);
// SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
// directly generate them into func.prologue instead of the body.
const var_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
.id_result_type = self.typeId(fn_ptr_ty_ref),
.id_result_type = self.typeId(ptr_fn_ty_ref),
.id_result = var_id,
.storage_class = .Function,
.initializer = initializer,
.initializer = options.initializer,
});
// Convert to a generic pointer
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
.id_result_type = self.typeId(general_ptr_ty_ref),
.id_result = result_id,
.pointer = var_id,
});
return result_id;
switch (options.storage_class) {
.Generic => {
const ptr_gn_ty_ref = try self.ptrType(ty, .Generic);
// Convert to a generic pointer
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpPtrCastToGeneric, .{
.id_result_type = self.typeId(ptr_gn_ty_ref),
.id_result = result_id,
.pointer = var_id,
});
return result_id;
},
.Function => return var_id,
else => unreachable,
}
}
fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3665,8 +3642,7 @@ const DeclGen = struct {
const ptr_ty = self.typeOfIndex(inst);
assert(ptr_ty.ptrAddressSpace(mod) == .generic);
const child_ty = ptr_ty.childType(mod);
const child_ty_ref = try self.resolveType(child_ty, .indirect);
return try self.alloc(child_ty_ref, null);
return try self.alloc(child_ty, .{});
}
fn airArg(self: *DeclGen) IdRef {
@@ -3781,7 +3757,7 @@ const DeclGen = struct {
const operand = try self.resolve(ty_op.operand);
if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null;
return try self.load(elem_ty, operand, ptr_ty.isVolatilePtr(mod));
return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
}
fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -3791,7 +3767,7 @@ const DeclGen = struct {
const ptr = try self.resolve(bin_op.lhs);
const value = try self.resolve(bin_op.rhs);
try self.store(elem_ty, ptr, value, ptr_ty.isVolatilePtr(self.module));
try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(self.module) });
}
fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -3855,7 +3831,7 @@ const DeclGen = struct {
}
const ptr = try self.resolve(un_op);
const value = try self.load(ret_ty, ptr, ptr_ty.isVolatilePtr(mod));
const value = try self.load(ret_ty, ptr, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) });
try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{
.value = value,
});
@@ -3981,8 +3957,11 @@ const DeclGen = struct {
members[eu_layout.errorFieldIndex()] = operand_id;
members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(payload_ty_ref);
const err_union_ty_ref = try self.resolveType(err_union_ty, .direct);
return try self.constructStruct(err_union_ty_ref, &members);
var types: [2]Type = undefined;
types[eu_layout.errorFieldIndex()] = Type.anyerror;
types[eu_layout.payloadFieldIndex()] = payload_ty;
return try self.constructStruct(err_union_ty, &types, &members);
}
fn airWrapErrUnionPayload(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -4003,8 +3982,11 @@ const DeclGen = struct {
members[eu_layout.errorFieldIndex()] = try self.constInt(err_ty_ref, 0);
members[eu_layout.payloadFieldIndex()] = try self.convertToIndirect(payload_ty, operand_id);
const err_union_ty_ref = try self.resolveType(err_union_ty, .direct);
return try self.constructStruct(err_union_ty_ref, &members);
var types: [2]Type = undefined;
types[eu_layout.errorFieldIndex()] = Type.anyerror;
types[eu_layout.payloadFieldIndex()] = payload_ty;
return try self.constructStruct(err_union_ty, &types, &members);
}
fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef {
@@ -4038,7 +4020,7 @@ const DeclGen = struct {
.is_null => .eq,
.is_non_null => .neq,
};
return try self.cmp(op, ptr_ty, ptr_id, null_id);
return try self.cmp(op, Type.bool, ptr_ty, ptr_id, null_id);
}
const is_non_null_id = if (payload_ty.hasRuntimeBitsIgnoreComptime(mod))
@@ -4136,10 +4118,10 @@ const DeclGen = struct {
return operand_id;
}
const optional_ty_ref = try self.resolveType(optional_ty, .direct);
const payload_id = try self.convertToIndirect(payload_ty, operand_id);
const members = [_]IdRef{ payload_id, try self.constBool(true, .indirect) };
return try self.constructStruct(optional_ty_ref, &members);
const types = [_]Type{ payload_ty, Type.bool };
return try self.constructStruct(optional_ty, &types, &members);
}
fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -4421,6 +4403,7 @@ const DeclGen = struct {
}
// TODO: Multiple results
// TODO: Check that the output type from assembly is the same as the type actually expected by Zig.
}
return null;
+10 -4
View File
@@ -304,10 +304,16 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
// and so some consideration must be taken when entering this in the type system.
return self.todo("process OpTypeArray", .{});
},
.OpTypePointer => try self.spv.ptrType(
try self.resolveTypeRef(operands[2].ref_id),
@as(spec.StorageClass, @enumFromInt(operands[1].value)),
),
.OpTypePointer => blk: {
break :blk try self.spv.resolve(.{
.ptr_type = .{
.storage_class = @enumFromInt(operands[1].value),
.child_type = try self.resolveTypeRef(operands[2].ref_id),
// TODO: This should be a proper reference resolved via OpTypeForwardPointer
.fwd = @enumFromInt(std.math.maxInt(u32)),
},
});
},
.OpTypeFunction => blk: {
const param_operands = operands[2..];
const param_types = try self.spv.gpa.alloc(CacheRef, param_operands.len);
+182 -124
View File
@@ -22,6 +22,8 @@ const Opcode = spec.Opcode;
const IdResult = spec.IdResult;
const StorageClass = spec.StorageClass;
const InternPool = @import("../../InternPool.zig");
const Self = @This();
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
@@ -31,6 +33,8 @@ extra: std.ArrayListUnmanaged(u32) = .{},
string_bytes: std.ArrayListUnmanaged(u8) = .{},
strings: std.AutoArrayHashMapUnmanaged(void, u32) = .{},
recursive_ptrs: std.AutoHashMapUnmanaged(Ref, void) = .{},
const Item = struct {
tag: Tag,
/// The result-id that this item uses.
@@ -62,18 +66,21 @@ const Tag = enum {
/// Function (proto)type
/// data is payload to FunctionType
type_function,
/// Pointer type in the CrossWorkgroup storage class
/// data is child type
type_ptr_generic,
/// Pointer type in the CrossWorkgroup storage class
/// data is child type
type_ptr_crosswgp,
/// Pointer type in the Function storage class
/// data is child type
type_ptr_function,
// /// Pointer type in the CrossWorkgroup storage class
// /// data is child type
// type_ptr_generic,
// /// Pointer type in the CrossWorkgroup storage class
// /// data is child type
// type_ptr_crosswgp,
// /// Pointer type in the Function storage class
// /// data is child type
// type_ptr_function,
/// Simple pointer type that does not have any decorations.
/// data is payload to SimplePointerType
type_ptr_simple,
/// A forward declaration for a pointer.
/// data is ForwardPointerType
type_fwd_ptr,
/// Simple structure type that does not have any decorations.
/// data is payload to SimpleStructType
type_struct_simple,
@@ -142,6 +149,12 @@ const Tag = enum {
const SimplePointerType = struct {
storage_class: StorageClass,
child_type: Ref,
fwd: Ref,
};
const ForwardPointerType = struct {
storage_class: StorageClass,
zig_child_type: InternPool.Index,
};
/// Trailing:
@@ -163,14 +176,14 @@ const Tag = enum {
fn encode(value: f64) Float64 {
const bits = @as(u64, @bitCast(value));
return .{
.low = @as(u32, @truncate(bits)),
.high = @as(u32, @truncate(bits >> 32)),
.low = @truncate(bits),
.high = @truncate(bits >> 32),
};
}
fn decode(self: Float64) f64 {
const bits = @as(u64, self.low) | (@as(u64, self.high) << 32);
return @as(f64, @bitCast(bits));
return @bitCast(bits);
}
};
@@ -192,8 +205,8 @@ const Tag = enum {
fn encode(ty: Ref, value: u64) Int64 {
return .{
.ty = ty,
.low = @as(u32, @truncate(value)),
.high = @as(u32, @truncate(value >> 32)),
.low = @truncate(value),
.high = @truncate(value >> 32),
};
}
@@ -210,8 +223,8 @@ const Tag = enum {
fn encode(ty: Ref, value: i64) Int64 {
return .{
.ty = ty,
.low = @as(u32, @truncate(@as(u64, @bitCast(value)))),
.high = @as(u32, @truncate(@as(u64, @bitCast(value)) >> 32)),
.low = @truncate(@as(u64, @bitCast(value))),
.high = @truncate(@as(u64, @bitCast(value)) >> 32),
};
}
@@ -237,6 +250,7 @@ pub const Key = union(enum) {
array_type: ArrayType,
function_type: FunctionType,
ptr_type: PointerType,
fwd_ptr_type: ForwardPointerType,
struct_type: StructType,
opaque_type: OpaqueType,
@@ -273,12 +287,18 @@ pub const Key = union(enum) {
pub const PointerType = struct {
storage_class: StorageClass,
child_type: Ref,
fwd: Ref,
// TODO: Decorations:
// - Alignment
// - ArrayStride,
// - MaxByteOffset,
};
pub const ForwardPointerType = struct {
zig_child_type: InternPool.Index,
storage_class: StorageClass,
};
pub const StructType = struct {
// TODO: Decorations.
/// The name of the structure. Can be `.none`.
@@ -313,21 +333,21 @@ pub const Key = union(enum) {
/// Turns this value into the corresponding 32-bit literal, 2s complement signed.
fn toBits32(self: Int) u32 {
return switch (self.value) {
.uint64 => |val| @as(u32, @intCast(val)),
.int64 => |val| if (val < 0) @as(u32, @bitCast(@as(i32, @intCast(val)))) else @as(u32, @intCast(val)),
.uint64 => |val| @intCast(val),
.int64 => |val| if (val < 0) @bitCast(@as(i32, @intCast(val))) else @intCast(val),
};
}
fn toBits64(self: Int) u64 {
return switch (self.value) {
.uint64 => |val| val,
.int64 => |val| @as(u64, @bitCast(val)),
.int64 => |val| @bitCast(val),
};
}
fn to(self: Int, comptime T: type) T {
return switch (self.value) {
inline else => |val| @as(T, @intCast(val)),
inline else => |val| @intCast(val),
};
}
};
@@ -387,7 +407,7 @@ pub const Key = union(enum) {
},
inline else => |key| std.hash.autoHash(&hasher, key),
}
return @as(u32, @truncate(hasher.final()));
return @truncate(hasher.final());
}
fn eql(a: Key, b: Key) bool {
@@ -419,7 +439,7 @@ pub const Key = union(enum) {
pub fn eql(ctx: @This(), a: Key, b_void: void, b_index: usize) bool {
_ = b_void;
return ctx.self.lookup(@as(Ref, @enumFromInt(b_index))).eql(a);
return ctx.self.lookup(@enumFromInt(b_index)).eql(a);
}
pub fn hash(ctx: @This(), a: Key) u32 {
@@ -450,6 +470,7 @@ pub fn deinit(self: *Self, spv: *const Module) void {
self.extra.deinit(spv.gpa);
self.string_bytes.deinit(spv.gpa);
self.strings.deinit(spv.gpa);
self.recursive_ptrs.deinit(spv.gpa);
}
/// Actually materialize the database into spir-v instructions.
@@ -460,7 +481,7 @@ pub fn materialize(self: *const Self, spv: *Module) !Section {
var section = Section{};
errdefer section.deinit(spv.gpa);
for (self.items.items(.result_id), 0..) |result_id, index| {
try self.emit(spv, result_id, @as(Ref, @enumFromInt(index)), &section);
try self.emit(spv, result_id, @enumFromInt(index), &section);
}
return section;
}
@@ -538,6 +559,15 @@ fn emit(
});
// TODO: Decorations?
},
.fwd_ptr_type => |fwd| {
// Only emit the OpTypeForwardPointer if its actually required.
if (self.recursive_ptrs.contains(ref)) {
try section.emit(spv.gpa, .OpTypeForwardPointer, .{
.pointer_type = result_id,
.storage_class = fwd.storage_class,
});
}
},
.struct_type => |struct_type| {
try section.emitRaw(spv.gpa, .OpTypeStruct, 1 + struct_type.member_types.len);
section.writeOperand(IdResult, result_id);
@@ -549,7 +579,7 @@ fn emit(
}
for (struct_type.memberNames(), 0..) |member_name, i| {
if (self.getString(member_name)) |name| {
try spv.memberDebugName(result_id, @as(u32, @intCast(i)), name);
try spv.memberDebugName(result_id, @intCast(i), name);
}
}
// TODO: Decorations?
@@ -625,13 +655,12 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
const adapter: Key.Adapter = .{ .self = self };
const entry = try self.map.getOrPutAdapted(spv.gpa, key, adapter);
if (entry.found_existing) {
return @as(Ref, @enumFromInt(entry.index));
return @enumFromInt(entry.index);
}
const result_id = spv.allocId();
const item: Item = switch (key) {
inline .void_type, .bool_type => .{
.tag = .type_simple,
.result_id = result_id,
.result_id = spv.allocId(),
.data = @intFromEnum(key.toSimpleType()),
},
.int_type => |int| blk: {
@@ -641,87 +670,104 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
};
break :blk .{
.tag = t,
.result_id = result_id,
.result_id = spv.allocId(),
.data = int.bits,
};
},
.float_type => |float| .{
.tag = .type_float,
.result_id = result_id,
.result_id = spv.allocId(),
.data = float.bits,
},
.vector_type => |vector| .{
.tag = .type_vector,
.result_id = result_id,
.result_id = spv.allocId(),
.data = try self.addExtra(spv, vector),
},
.array_type => |array| .{
.tag = .type_array,
.result_id = result_id,
.result_id = spv.allocId(),
.data = try self.addExtra(spv, array),
},
.function_type => |function| blk: {
const extra = try self.addExtra(spv, Tag.FunctionType{
.param_len = @as(u32, @intCast(function.parameters.len)),
.param_len = @intCast(function.parameters.len),
.return_type = function.return_type,
});
try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(function.parameters)));
try self.extra.appendSlice(spv.gpa, @ptrCast(function.parameters));
break :blk .{
.tag = .type_function,
.result_id = result_id,
.result_id = spv.allocId(),
.data = extra,
};
},
.ptr_type => |ptr| switch (ptr.storage_class) {
.Generic => Item{
.tag = .type_ptr_generic,
.result_id = result_id,
.data = @intFromEnum(ptr.child_type),
},
.CrossWorkgroup => Item{
.tag = .type_ptr_crosswgp,
.result_id = result_id,
.data = @intFromEnum(ptr.child_type),
},
.Function => Item{
.tag = .type_ptr_function,
.result_id = result_id,
.data = @intFromEnum(ptr.child_type),
},
else => |storage_class| Item{
.tag = .type_ptr_simple,
.result_id = result_id,
.data = try self.addExtra(spv, Tag.SimplePointerType{
.storage_class = storage_class,
.child_type = ptr.child_type,
}),
},
// .ptr_type => |ptr| switch (ptr.storage_class) {
// .Generic => Item{
// .tag = .type_ptr_generic,
// .result_id = spv.allocId(),
// .data = @intFromEnum(ptr.child_type),
// },
// .CrossWorkgroup => Item{
// .tag = .type_ptr_crosswgp,
// .result_id = spv.allocId(),
// .data = @intFromEnum(ptr.child_type),
// },
// .Function => Item{
// .tag = .type_ptr_function,
// .result_id = spv.allocId(),
// .data = @intFromEnum(ptr.child_type),
// },
// else => |storage_class| Item{
// .tag = .type_ptr_simple,
// .result_id = spv.allocId(),
// .data = try self.addExtra(spv, Tag.SimplePointerType{
// .storage_class = storage_class,
// .child_type = ptr.child_type,
// }),
// },
// },
.ptr_type => |ptr| Item{
.tag = .type_ptr_simple,
.result_id = self.resultId(ptr.fwd),
.data = try self.addExtra(spv, Tag.SimplePointerType{
.storage_class = ptr.storage_class,
.child_type = ptr.child_type,
.fwd = ptr.fwd,
}),
},
.fwd_ptr_type => |fwd| Item{
.tag = .type_fwd_ptr,
.result_id = spv.allocId(),
.data = try self.addExtra(spv, Tag.ForwardPointerType{
.zig_child_type = fwd.zig_child_type,
.storage_class = fwd.storage_class,
}),
},
.struct_type => |struct_type| blk: {
const extra = try self.addExtra(spv, Tag.SimpleStructType{
.name = struct_type.name,
.members_len = @as(u32, @intCast(struct_type.member_types.len)),
.members_len = @intCast(struct_type.member_types.len),
});
try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(struct_type.member_types)));
try self.extra.appendSlice(spv.gpa, @ptrCast(struct_type.member_types));
if (struct_type.member_names) |member_names| {
try self.extra.appendSlice(spv.gpa, @as([]const u32, @ptrCast(member_names)));
try self.extra.appendSlice(spv.gpa, @ptrCast(member_names));
break :blk Item{
.tag = .type_struct_simple_with_member_names,
.result_id = result_id,
.result_id = spv.allocId(),
.data = extra,
};
} else {
break :blk Item{
.tag = .type_struct_simple,
.result_id = result_id,
.result_id = spv.allocId(),
.data = extra,
};
}
},
.opaque_type => |opaque_type| Item{
.tag = .type_opaque,
.result_id = result_id,
.result_id = spv.allocId(),
.data = @intFromEnum(opaque_type.name),
},
.int => |int| blk: {
@@ -729,13 +775,13 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
if (int_type.signedness == .unsigned and int_type.bits == 8) {
break :blk .{
.tag = .uint8,
.result_id = result_id,
.result_id = spv.allocId(),
.data = int.to(u8),
};
} else if (int_type.signedness == .unsigned and int_type.bits == 32) {
break :blk .{
.tag = .uint32,
.result_id = result_id,
.result_id = spv.allocId(),
.data = int.to(u32),
};
}
@@ -745,32 +791,32 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
if (val >= 0 and val <= std.math.maxInt(u32)) {
break :blk .{
.tag = .uint_small,
.result_id = result_id,
.result_id = spv.allocId(),
.data = try self.addExtra(spv, Tag.UInt32{
.ty = int.ty,
.value = @as(u32, @intCast(val)),
.value = @intCast(val),
}),
};
} else if (val >= std.math.minInt(i32) and val <= std.math.maxInt(i32)) {
break :blk .{
.tag = .int_small,
.result_id = result_id,
.result_id = spv.allocId(),
.data = try self.addExtra(spv, Tag.Int32{
.ty = int.ty,
.value = @as(i32, @intCast(val)),
.value = @intCast(val),
}),
};
} else if (val < 0) {
break :blk .{
.tag = .int_large,
.result_id = result_id,
.data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @as(i64, @intCast(val)))),
.result_id = spv.allocId(),
.data = try self.addExtra(spv, Tag.Int64.encode(int.ty, @intCast(val))),
};
} else {
break :blk .{
.tag = .uint_large,
.result_id = result_id,
.data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @as(u64, @intCast(val)))),
.result_id = spv.allocId(),
.data = try self.addExtra(spv, Tag.UInt64.encode(int.ty, @intCast(val))),
};
}
},
@@ -779,29 +825,29 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
.float => |float| switch (self.lookup(float.ty).float_type.bits) {
16 => .{
.tag = .float16,
.result_id = result_id,
.result_id = spv.allocId(),
.data = @as(u16, @bitCast(float.value.float16)),
},
32 => .{
.tag = .float32,
.result_id = result_id,
.result_id = spv.allocId(),
.data = @as(u32, @bitCast(float.value.float32)),
},
64 => .{
.tag = .float64,
.result_id = result_id,
.result_id = spv.allocId(),
.data = try self.addExtra(spv, Tag.Float64.encode(float.value.float64)),
},
else => unreachable,
},
.undef => |undef| .{
.tag = .undef,
.result_id = result_id,
.result_id = spv.allocId(),
.data = @intFromEnum(undef.ty),
},
.null => |null_info| .{
.tag = .null,
.result_id = result_id,
.result_id = spv.allocId(),
.data = @intFromEnum(null_info.ty),
},
.bool => |bool_info| .{
@@ -809,13 +855,13 @@ pub fn resolve(self: *Self, spv: *Module, key: Key) !Ref {
true => Tag.bool_true,
false => Tag.bool_false,
},
.result_id = result_id,
.result_id = spv.allocId(),
.data = @intFromEnum(bool_info.ty),
},
};
try self.items.append(spv.gpa, item);
return @as(Ref, @enumFromInt(entry.index));
return @enumFromInt(entry.index);
}
/// Turn a Ref back into a Key.
@@ -830,14 +876,14 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
},
.type_int_signed => .{ .int_type = .{
.signedness = .signed,
.bits = @as(u16, @intCast(data)),
.bits = @intCast(data),
} },
.type_int_unsigned => .{ .int_type = .{
.signedness = .unsigned,
.bits = @as(u16, @intCast(data)),
.bits = @intCast(data),
} },
.type_float => .{ .float_type = .{
.bits = @as(u16, @intCast(data)),
.bits = @intCast(data),
} },
.type_vector => .{ .vector_type = self.extraData(Tag.VectorType, data) },
.type_array => .{ .array_type = self.extraData(Tag.ArrayType, data) },
@@ -846,40 +892,50 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
return .{
.function_type = .{
.return_type = payload.data.return_type,
.parameters = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len])),
.parameters = @ptrCast(self.extra.items[payload.trail..][0..payload.data.param_len]),
},
};
},
.type_ptr_generic => .{
.ptr_type = .{
.storage_class = .Generic,
.child_type = @as(Ref, @enumFromInt(data)),
},
},
.type_ptr_crosswgp => .{
.ptr_type = .{
.storage_class = .CrossWorkgroup,
.child_type = @as(Ref, @enumFromInt(data)),
},
},
.type_ptr_function => .{
.ptr_type = .{
.storage_class = .Function,
.child_type = @as(Ref, @enumFromInt(data)),
},
},
// .type_ptr_generic => .{
// .ptr_type = .{
// .storage_class = .Generic,
// .child_type = @enumFromInt(data),
// },
// },
// .type_ptr_crosswgp => .{
// .ptr_type = .{
// .storage_class = .CrossWorkgroup,
// .child_type = @enumFromInt(data),
// },
// },
// .type_ptr_function => .{
// .ptr_type = .{
// .storage_class = .Function,
// .child_type = @enumFromInt(data),
// },
// },
.type_ptr_simple => {
const payload = self.extraData(Tag.SimplePointerType, data);
return .{
.ptr_type = .{
.storage_class = payload.storage_class,
.child_type = payload.child_type,
.fwd = payload.fwd,
},
};
},
.type_fwd_ptr => {
const payload = self.extraData(Tag.ForwardPointerType, data);
return .{
.fwd_ptr_type = .{
.zig_child_type = payload.zig_child_type,
.storage_class = payload.storage_class,
},
};
},
.type_struct_simple => {
const payload = self.extraDataTrail(Tag.SimpleStructType, data);
const member_types = @as([]const Ref, @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len]));
const member_types: []const Ref = @ptrCast(self.extra.items[payload.trail..][0..payload.data.members_len]);
return .{
.struct_type = .{
.name = payload.data.name,
@@ -891,8 +947,8 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
.type_struct_simple_with_member_names => {
const payload = self.extraDataTrail(Tag.SimpleStructType, data);
const trailing = self.extra.items[payload.trail..];
const member_types = @as([]const Ref, @ptrCast(trailing[0..payload.data.members_len]));
const member_names = @as([]const String, @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len]));
const member_types: []const Ref = @ptrCast(trailing[0..payload.data.members_len]);
const member_names: []const String = @ptrCast(trailing[payload.data.members_len..][0..payload.data.members_len]);
return .{
.struct_type = .{
.name = payload.data.name,
@@ -903,16 +959,16 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
},
.type_opaque => .{
.opaque_type = .{
.name = @as(String, @enumFromInt(data)),
.name = @enumFromInt(data),
},
},
.float16 => .{ .float = .{
.ty = self.get(.{ .float_type = .{ .bits = 16 } }),
.value = .{ .float16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) },
.value = .{ .float16 = @bitCast(@as(u16, @intCast(data))) },
} },
.float32 => .{ .float = .{
.ty = self.get(.{ .float_type = .{ .bits = 32 } }),
.value = .{ .float32 = @as(f32, @bitCast(data)) },
.value = .{ .float32 = @bitCast(data) },
} },
.float64 => .{ .float = .{
.ty = self.get(.{ .float_type = .{ .bits = 64 } }),
@@ -955,17 +1011,17 @@ pub fn lookup(self: *const Self, ref: Ref) Key {
} };
},
.undef => .{ .undef = .{
.ty = @as(Ref, @enumFromInt(data)),
.ty = @enumFromInt(data),
} },
.null => .{ .null = .{
.ty = @as(Ref, @enumFromInt(data)),
.ty = @enumFromInt(data),
} },
.bool_true => .{ .bool = .{
.ty = @as(Ref, @enumFromInt(data)),
.ty = @enumFromInt(data),
.value = true,
} },
.bool_false => .{ .bool = .{
.ty = @as(Ref, @enumFromInt(data)),
.ty = @enumFromInt(data),
.value = false,
} },
};
@@ -981,7 +1037,7 @@ pub fn resultId(self: Self, ref: Ref) IdResult {
fn get(self: *const Self, key: Key) Ref {
const adapter: Key.Adapter = .{ .self = self };
const index = self.map.getIndexAdapted(key, adapter).?;
return @as(Ref, @enumFromInt(index));
return @enumFromInt(index);
}
fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
@@ -991,15 +1047,16 @@ fn addExtra(self: *Self, spv: *Module, extra: anytype) !u32 {
}
fn addExtraAssumeCapacity(self: *Self, extra: anytype) !u32 {
const payload_offset = @as(u32, @intCast(self.extra.items.len));
const payload_offset: u32 = @intCast(self.extra.items.len);
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
const field_val = @field(extra, field.name);
const word = switch (field.type) {
const word: u32 = switch (field.type) {
u32 => field_val,
i32 => @as(u32, @bitCast(field_val)),
i32 => @bitCast(field_val),
Ref => @intFromEnum(field_val),
StorageClass => @intFromEnum(field_val),
String => @intFromEnum(field_val),
InternPool.Index => @intFromEnum(field_val),
else => @compileError("Invalid type: " ++ @typeName(field.type)),
};
self.extra.appendAssumeCapacity(word);
@@ -1018,10 +1075,11 @@ fn extraDataTrail(self: Self, comptime T: type, offset: u32) struct { data: T, t
const word = self.extra.items[offset + i];
@field(result, field.name) = switch (field.type) {
u32 => word,
i32 => @as(i32, @bitCast(word)),
Ref => @as(Ref, @enumFromInt(word)),
StorageClass => @as(StorageClass, @enumFromInt(word)),
String => @as(String, @enumFromInt(word)),
i32 => @bitCast(word),
Ref => @enumFromInt(word),
StorageClass => @enumFromInt(word),
String => @enumFromInt(word),
InternPool.Index => @enumFromInt(word),
else => @compileError("Invalid type: " ++ @typeName(field.type)),
};
}
@@ -1049,7 +1107,7 @@ pub const String = enum(u32) {
_ = ctx;
var hasher = std.hash.Wyhash.init(0);
hasher.update(a);
return @as(u32, @truncate(hasher.final()));
return @truncate(hasher.final());
}
};
};
@@ -1064,10 +1122,10 @@ pub fn addString(self: *Self, spv: *Module, str: []const u8) !String {
try self.string_bytes.ensureUnusedCapacity(spv.gpa, 1 + str.len);
self.string_bytes.appendSliceAssumeCapacity(str);
self.string_bytes.appendAssumeCapacity(0);
entry.value_ptr.* = @as(u32, @intCast(offset));
entry.value_ptr.* = @intCast(offset);
}
return @as(String, @enumFromInt(entry.index));
return @enumFromInt(entry.index);
}
pub fn getString(self: *const Self, ref: String) ?[]const u8 {
-11
View File
@@ -507,17 +507,6 @@ pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
} });
}
pub fn ptrType(
self: *Module,
child: CacheRef,
storage_class: spec.StorageClass,
) !CacheRef {
return try self.resolve(.{ .ptr_type = .{
.storage_class = storage_class,
.child_type = child,
} });
}
pub fn constInt(self: *Module, ty_ref: CacheRef, value: anytype) !IdRef {
const ty = self.cache.lookup(ty_ref).int_type;
const Value = Cache.Key.Int.Value;
-1
View File
@@ -9,7 +9,6 @@ test {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var t: T = .{ .next = null };
try std.testing.expect(t.next == null);
-1
View File
@@ -44,7 +44,6 @@ const a = struct {
test "initialization" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var t = a.init();
try std.testing.expect(t.foo.len == 0);
-4
View File
@@ -12,8 +12,6 @@ const b_list: []B = &[_]B{};
const a = A{ .b_list_pointer = &b_list };
test "segfault bug" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const assert = std.debug.assert;
const obj = B{ .a_pointer = &a };
assert(obj.a_pointer == &a); // this makes zig crash
@@ -30,7 +28,5 @@ pub const B2 = struct {
var b_value = B2{ .pointer_array = &[_]*A2{} };
test "basic stuff" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
std.debug.assert(&b_value == &b_value);
}
-1
View File
@@ -7,7 +7,6 @@ const S = struct {
};
test "bug 2006" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var a: S = undefined;
a = S{ .p = undefined };
-1
View File
@@ -22,7 +22,6 @@ test "fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
default_foo = get_foo() catch null; // This Line
try std.testing.expect(!default_foo.?.free);
-1
View File
@@ -8,7 +8,6 @@ test {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var slice: []void = undefined;
destroy(&slice[0]);
-1
View File
@@ -81,7 +81,6 @@ test {
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var param: ParamType = .{
.one_of = .{ .name = "name" },
-1
View File
@@ -943,7 +943,6 @@ test "returning an error union containing a type with no runtime bits" {
test "try used in recursive function with inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const Value = union(enum) {
values: []const @This(),
-4
View File
@@ -391,7 +391,6 @@ test "return 0 from function that has u0 return type" {
test "statically initialized struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
st_init_str_foo.x += 1;
try expect(st_init_str_foo.x == 14);
@@ -498,7 +497,6 @@ test "comptime shlWithOverflow" {
test "const ptr to variable data changes at runtime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
try expect(foo_ref.name[0] == 'a');
foo_ref.name = "b";
@@ -1551,8 +1549,6 @@ test "comptime function turns function value to function pointer" {
}
test "container level const and var have unique addresses" {
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
x: i32,
y: i32,
-1
View File
@@ -205,7 +205,6 @@ fn foo2(arg: anytype) bool {
test "generic struct" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var a1 = GenNode(i32){
.value = 13,
-1
View File
@@ -185,7 +185,6 @@ test "unwrap optional which is field of global var" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
struct_with_optional.field = null;
if (struct_with_optional.field) |payload| {
-1
View File
@@ -193,7 +193,6 @@ test "nested orelse" {
test "self-referential struct through a slice of optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
const Node = struct {
-2
View File
@@ -130,7 +130,6 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// Test lowering a field ptr
comptime var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
@@ -153,7 +152,6 @@ test "lower reinterpreted comptime field ptr" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
// Test lowering a field ptr
comptime var bytes align(4) = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
-6
View File
@@ -292,7 +292,6 @@ const Val = struct {
test "struct point to self" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var root: Node = undefined;
root.val.x = 1;
@@ -347,7 +346,6 @@ test "self-referencing struct via array member" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const T = struct {
children: [1]*@This(),
@@ -370,7 +368,6 @@ const EmptyStruct = struct {
test "align 1 field before self referential align 8 field as slice return type" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const result = alloc(Expr);
try expect(result.len == 0);
@@ -736,7 +733,6 @@ test "packed struct with u0 field access" {
test "access to global struct fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
g_foo.bar.value = 42;
try expect(g_foo.bar.value == 42);
@@ -1423,7 +1419,6 @@ test "fieldParentPtr of a zero-bit field" {
test "struct field has a pointer to an aligned version of itself" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const E = struct {
next: *align(1) @This(),
@@ -1519,7 +1514,6 @@ test "function pointer in struct returns the struct" {
test "no dependency loop on optional field wrapped in generic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
fn Atomic(comptime T: type) type {
@@ -5,7 +5,6 @@ const builtin = @import("builtin");
test "struct contains null pointer which contains original struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var x: ?*NodeLineComment = null;
try expect(x == null);
@@ -13,7 +13,6 @@ const NodeAligned = struct {
test "struct contains slice of itself" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var other_nodes = [_]Node{
Node{
@@ -54,7 +53,6 @@ test "struct contains slice of itself" {
test "struct contains aligned slice of itself" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
var other_nodes = [_]NodeAligned{
NodeAligned{
-10
View File
@@ -399,7 +399,6 @@ test "tagged union with no payloads" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const a = UnionEnumNoPayloads{ .B = {} };
switch (a) {
@@ -474,7 +473,6 @@ test "update the tag value for zero-sized unions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = union(enum) {
U0: void,
@@ -515,7 +513,6 @@ test "method call on an empty union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
const MyUnion = union(MyUnionTag) {
@@ -593,7 +590,6 @@ test "tagged union with all void fields but a meaningful tag" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
const B = union(enum) {
@@ -795,7 +791,6 @@ test "@unionInit stored to a const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
const U = union(enum) {
@@ -867,7 +862,6 @@ test "union no tag with struct member" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const Struct = struct {};
const Union = union {
@@ -1079,7 +1073,6 @@ test "@unionInit on union with tag but no fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const S = struct {
const Type = enum(u8) { no_op = 105 };
@@ -1128,7 +1121,6 @@ test "global variable struct contains union initialized to non-most-aligned fiel
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const T = struct {
const U = union(enum) {
@@ -1348,7 +1340,6 @@ test "union field ptr - zero sized payload" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const U = union {
foo: void,
@@ -1363,7 +1354,6 @@ test "union field ptr - zero sized field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
const U = union {
foo: void,