Merge pull request #21063 from mlugg/incremental

Incremental compilation progress
This commit is contained in:
Matthew Lugg
2024-08-18 12:56:04 +01:00
committed by GitHub
19 changed files with 3171 additions and 966 deletions
+5
View File
@@ -72,6 +72,11 @@ pub fn BoundedArrayAligned(
self.len = @intCast(len);
}
/// Remove all elements from the slice.
pub fn clear(self: *Self) void {
self.len = 0;
}
/// Copy the content of an existing slice.
pub fn fromSlice(m: []const T) error{Overflow}!Self {
var list = try init(m.len);
+606 -54
View File
@@ -603,7 +603,7 @@ pub const Inst = struct {
/// Uses the `un_node` field.
typeof,
/// Implements `@TypeOf` for one operand.
/// Uses the `pl_node` field.
/// Uses the `pl_node` field. Payload is `Block`.
typeof_builtin,
/// Given a value, look at the type of it, which must be an integer type.
/// Returns the integer type for the RHS of a shift operation.
@@ -2727,6 +2727,9 @@ pub const Inst = struct {
field_name_start: NullTerminatedString,
};
/// There is a body of instructions at `extra[body_index..][0..body_len]`.
/// Trailing:
/// 0. operand: Ref // for each `operands_len`
pub const TypeOfPeer = struct {
src_node: i32,
body_len: u32,
@@ -2844,6 +2847,40 @@ pub const Inst = struct {
src_line: u32,
};
/// Trailing:
/// 0. multi_cases_len: u32 // if `has_multi_cases`
/// 1. err_capture_inst: u32 // if `any_uses_err_capture`
/// 2. non_err_body {
/// info: ProngInfo,
/// inst: Index // for every `info.body_len`
/// }
/// 3. else_body { // if `has_else`
/// info: ProngInfo,
/// inst: Index // for every `info.body_len`
/// }
/// 4. scalar_cases: { // for every `scalar_cases_len`
/// item: Ref,
/// info: ProngInfo,
/// inst: Index // for every `info.body_len`
/// }
/// 5. multi_cases: { // for every `multi_cases_len`
/// items_len: u32,
/// ranges_len: u32,
/// info: ProngInfo,
/// item: Ref // for every `items_len`
/// ranges: { // for every `ranges_len`
/// item_first: Ref,
/// item_last: Ref,
/// }
/// inst: Index // for every `info.body_len`
/// }
///
/// When analyzing a case body, the switch instruction itself refers to the
/// captured error, or to the success value in `non_err_body`. Whether this
/// is captured by reference or by value depends on whether the `byref` bit
/// is set for the corresponding body. `err_capture_inst` refers to the error
/// capture outside of the `switch`, i.e. `err` in
/// `x catch |err| switch (err) { ... }`.
pub const SwitchBlockErrUnion = struct {
operand: Ref,
bits: Bits,
@@ -3153,7 +3190,7 @@ pub const Inst = struct {
/// 1. captures_len: u32 // if has_captures_len
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u37, // if has_decls_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
@@ -3624,33 +3661,492 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
}
}
/// The iterator would have to allocate memory anyway to iterate. So here we populate
/// an ArrayList as the result.
pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_inst: Zir.Inst.Index) !void {
/// Find all type declarations, recursively, within a `declaration` instruction. Does not recurse through
/// said type declarations' declarations; to find all declarations, call this function on the declarations
/// of the discovered types recursively.
/// The iterator would have to allocate memory anyway to iterate, so an `ArrayList` is populated as the result.
pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.Index), decl_inst: Zir.Inst.Index) !void {
list.clearRetainingCapacity();
const declaration, const extra_end = zir.getDeclaration(decl_inst);
const bodies = declaration.getBodies(extra_end, zir);
try zir.findDeclsBody(list, bodies.value_body);
if (bodies.align_body) |b| try zir.findDeclsBody(list, b);
if (bodies.linksection_body) |b| try zir.findDeclsBody(list, b);
if (bodies.addrspace_body) |b| try zir.findDeclsBody(list, b);
// `defer` instructions duplicate the same body arbitrarily many times, but we only want to traverse
// their contents once per defer. So, we store the extra index of the body here to deduplicate.
var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{};
defer found_defers.deinit(gpa);
try zir.findDeclsBody(gpa, list, &found_defers, bodies.value_body);
if (bodies.align_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b);
if (bodies.linksection_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b);
if (bodies.addrspace_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b);
}
fn findDeclsInner(
zir: Zir,
list: *std.ArrayList(Inst.Index),
gpa: Allocator,
list: *std.ArrayListUnmanaged(Inst.Index),
defers: *std.AutoHashMapUnmanaged(u32, void),
inst: Inst.Index,
) Allocator.Error!void {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
switch (tags[@intFromEnum(inst)]) {
.declaration => unreachable,
// Boring instruction tags first. These have no body and are not declarations or type declarations.
.add,
.addwrap,
.add_sat,
.add_unsafe,
.sub,
.subwrap,
.sub_sat,
.mul,
.mulwrap,
.mul_sat,
.div_exact,
.div_floor,
.div_trunc,
.mod,
.rem,
.mod_rem,
.shl,
.shl_exact,
.shl_sat,
.shr,
.shr_exact,
.param_anytype,
.param_anytype_comptime,
.array_cat,
.array_mul,
.array_type,
.array_type_sentinel,
.vector_type,
.elem_type,
.indexable_ptr_elem_type,
.vector_elem_type,
.indexable_ptr_len,
.anyframe_type,
.as_node,
.as_shift_operand,
.bit_and,
.bitcast,
.bit_not,
.bit_or,
.bool_not,
.bool_br_and,
.bool_br_or,
.@"break",
.break_inline,
.check_comptime_control_flow,
.builtin_call,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.error_set_decl,
.dbg_stmt,
.dbg_var_ptr,
.dbg_var_val,
.decl_ref,
.decl_val,
.load,
.div,
.elem_ptr_node,
.elem_ptr,
.elem_val_node,
.elem_val,
.elem_val_imm,
.ensure_result_used,
.ensure_result_non_error,
.ensure_err_union_payload_void,
.error_union_type,
.error_value,
.@"export",
.export_value,
.field_ptr,
.field_val,
.field_ptr_named,
.field_val_named,
.import,
.int,
.int_big,
.float,
.float128,
.int_type,
.is_non_null,
.is_non_null_ptr,
.is_non_err,
.is_non_err_ptr,
.ret_is_non_err,
.repeat,
.repeat_inline,
.for_len,
.merge_error_sets,
.ref,
.ret_node,
.ret_load,
.ret_implicit,
.ret_err_value,
.ret_err_value_code,
.ret_ptr,
.ret_type,
.ptr_type,
.slice_start,
.slice_end,
.slice_sentinel,
.slice_length,
.store_node,
.store_to_inferred_ptr,
.str,
.negate,
.negate_wrap,
.typeof,
.typeof_log2_int_type,
.@"unreachable",
.xor,
.optional_type,
.optional_payload_safe,
.optional_payload_unsafe,
.optional_payload_safe_ptr,
.optional_payload_unsafe_ptr,
.err_union_payload_unsafe,
.err_union_payload_unsafe_ptr,
.err_union_code,
.err_union_code_ptr,
.enum_literal,
.validate_deref,
.validate_destructure,
.field_type_ref,
.opt_eu_base_ptr_init,
.coerce_ptr_elem_ty,
.validate_ref_ty,
.struct_init_empty,
.struct_init_empty_result,
.struct_init_empty_ref_result,
.struct_init_anon,
.struct_init,
.struct_init_ref,
.validate_struct_init_ty,
.validate_struct_init_result_ty,
.validate_ptr_struct_init,
.struct_init_field_type,
.struct_init_field_ptr,
.array_init_anon,
.array_init,
.array_init_ref,
.validate_array_init_ty,
.validate_array_init_result_ty,
.validate_array_init_ref_ty,
.validate_ptr_array_init,
.array_init_elem_type,
.array_init_elem_ptr,
.union_init,
.type_info,
.size_of,
.bit_size_of,
.int_from_ptr,
.compile_error,
.set_eval_branch_quota,
.int_from_enum,
.align_of,
.int_from_bool,
.embed_file,
.error_name,
.panic,
.trap,
.set_runtime_safety,
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.abs,
.floor,
.ceil,
.trunc,
.round,
.tag_name,
.type_name,
.frame_type,
.frame_size,
.int_from_float,
.float_from_int,
.ptr_from_int,
.enum_from_int,
.float_cast,
.int_cast,
.ptr_cast,
.truncate,
.has_decl,
.has_field,
.clz,
.ctz,
.pop_count,
.byte_swap,
.bit_reverse,
.bit_offset_of,
.offset_of,
.splat,
.reduce,
.shuffle,
.atomic_load,
.atomic_rmw,
.atomic_store,
.mul_add,
.memcpy,
.memset,
.min,
.max,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
.alloc_inferred,
.alloc_inferred_mut,
.alloc_inferred_comptime,
.alloc_inferred_comptime_mut,
.resolve_inferred_alloc,
.make_ptr_const,
.@"resume",
.@"await",
.save_err_ret_index,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
=> return,
.extended => {
const extended = datas[@intFromEnum(inst)].extended;
switch (extended.opcode) {
.value_placeholder => unreachable,
// Once again, we start with the boring tags.
.variable,
.this,
.ret_addr,
.builtin_src,
.error_return_trace,
.frame,
.frame_address,
.alloc,
.builtin_extern,
.@"asm",
.asm_expr,
.compile_log,
.min_multi,
.max_multi,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.c_undef,
.c_include,
.c_define,
.wasm_memory_size,
.wasm_memory_grow,
.prefetch,
.fence,
.set_float_mode,
.set_align_stack,
.set_cold,
.error_cast,
.await_nosuspend,
.breakpoint,
.disable_instrumentation,
.select,
.int_from_error,
.error_from_int,
.builtin_async_call,
.cmpxchg,
.c_va_arg,
.c_va_copy,
.c_va_end,
.c_va_start,
.ptr_cast_full,
.ptr_cast_no_dest,
.work_item_id,
.work_group_size,
.work_group_id,
.in_comptime,
.restore_err_ret_index,
.closure_get,
.field_parent_ptr,
=> return,
// `@TypeOf` has a body.
.typeof_peer => {
const extra = zir.extraData(Zir.Inst.TypeOfPeer, extended.operand);
const body = zir.bodySlice(extra.data.body_index, extra.data.body_len);
try zir.findDeclsBody(gpa, list, defers, body);
},
// Reifications and opaque declarations need tracking, but have no body.
.reify, .opaque_decl => return list.append(gpa, inst),
// Struct declarations need tracking and have bodies.
.struct_decl => {
try list.append(gpa, inst);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
var extra_index = extra.end;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1;
if (backing_int_body_len == 0) {
extra_index += 1; // backing_int_ref
} else {
const body = zir.bodySlice(extra_index, backing_int_body_len);
extra_index += backing_int_body_len;
try zir.findDeclsBody(gpa, list, defers, body);
}
}
extra_index += decls_len;
// This ZIR is structured in a slightly awkward way, so we have to split up the iteration.
// `extra_index` iterates `flags` (bags of bits).
// `fields_extra_index` iterates `fields`.
// We accumulate the total length of bodies into `total_bodies_len`. This is sufficient because
// the bodies are packed together in `extra` and we only need to traverse their instructions (we
// don't really care about the structure).
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
var cur_bit_bag: u32 = undefined;
var fields_extra_index = extra_index + bit_bags_count;
var total_bodies_len: u32 = 0;
for (0..fields_len) |field_i| {
if (field_i % fields_per_u32 == 0) {
cur_bit_bag = zir.extra[extra_index];
extra_index += 1;
}
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
cur_bit_bag >>= 1;
const has_init = @as(u1, @truncate(cur_bit_bag)) != 0;
cur_bit_bag >>= 2; // also skip `is_comptime`; we don't care
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
cur_bit_bag >>= 1;
fields_extra_index += @intFromBool(!small.is_tuple); // field_name
fields_extra_index += 1; // doc_comment
if (has_type_body) {
const field_type_body_len = zir.extra[fields_extra_index];
total_bodies_len += field_type_body_len;
}
fields_extra_index += 1; // field_type or field_type_body_len
if (has_align) {
const align_body_len = zir.extra[fields_extra_index];
fields_extra_index += 1;
total_bodies_len += align_body_len;
}
if (has_init) {
const init_body_len = zir.extra[fields_extra_index];
fields_extra_index += 1;
total_bodies_len += init_body_len;
}
}
// Now, `fields_extra_index` points to `bodies`. Let's treat this as one big body.
const merged_bodies = zir.bodySlice(fields_extra_index, total_bodies_len);
try zir.findDeclsBody(gpa, list, defers, merged_bodies);
},
// Union declarations need tracking and have a body.
.union_decl => {
try list.append(gpa, inst);
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index = extra.end;
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :blk body_len;
} else 0;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += decls_len;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(gpa, list, defers, body);
},
// Enum declarations need tracking and have a body.
.enum_decl => {
try list.append(gpa, inst);
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index = extra.end;
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :blk body_len;
} else 0;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
extra_index += decls_len;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(gpa, list, defers, body);
},
}
},
// Functions instructions are interesting and have a body.
.func,
.func_inferred,
=> {
try list.append(inst);
try list.append(gpa, inst);
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
@@ -3661,14 +4157,14 @@ fn findDeclsInner(
else => {
const body = zir.bodySlice(extra_index, extra.data.ret_body_len);
extra_index += body.len;
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
},
}
const body = zir.bodySlice(extra_index, extra.data.body_len);
return zir.findDeclsBody(list, body);
return zir.findDeclsBody(gpa, list, defers, body);
},
.func_fancy => {
try list.append(inst);
try list.append(gpa, inst);
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
@@ -3679,7 +4175,7 @@ fn findDeclsInner(
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_align_ref) {
extra_index += 1;
@@ -3689,7 +4185,7 @@ fn findDeclsInner(
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_addrspace_ref) {
extra_index += 1;
@@ -3699,7 +4195,7 @@ fn findDeclsInner(
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_section_ref) {
extra_index += 1;
@@ -3709,7 +4205,7 @@ fn findDeclsInner(
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_cc_ref) {
extra_index += 1;
@@ -3719,7 +4215,7 @@ fn findDeclsInner(
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.bodySlice(extra_index, body_len);
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
extra_index += body.len;
} else if (extra.data.bits.has_ret_ty_ref) {
extra_index += 1;
@@ -3728,62 +4224,99 @@ fn findDeclsInner(
extra_index += @intFromBool(extra.data.bits.has_any_noalias);
const body = zir.bodySlice(extra_index, extra.data.body_len);
return zir.findDeclsBody(list, body);
},
.extended => {
const extended = datas[@intFromEnum(inst)].extended;
switch (extended.opcode) {
// Decl instructions are interesting but have no body.
// TODO yes they do have a body actually. recurse over them just like block instructions.
.struct_decl,
.union_decl,
.enum_decl,
.opaque_decl,
.reify,
=> return list.append(inst),
else => return,
}
return zir.findDeclsBody(gpa, list, defers, body);
},
// Block instructions, recurse over the bodies.
.block, .block_comptime, .block_inline => {
.block,
.block_comptime,
.block_inline,
.c_import,
.typeof_builtin,
.loop,
=> {
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(Inst.Block, inst_data.payload_index);
const body = zir.bodySlice(extra.end, extra.data.body_len);
return zir.findDeclsBody(list, body);
return zir.findDeclsBody(gpa, list, defers, body);
},
.condbr, .condbr_inline => {
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(Inst.CondBr, inst_data.payload_index);
const then_body = zir.bodySlice(extra.end, extra.data.then_body_len);
const else_body = zir.bodySlice(extra.end + then_body.len, extra.data.else_body_len);
try zir.findDeclsBody(list, then_body);
try zir.findDeclsBody(list, else_body);
try zir.findDeclsBody(gpa, list, defers, then_body);
try zir.findDeclsBody(gpa, list, defers, else_body);
},
.@"try", .try_ptr => {
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(Inst.Try, inst_data.payload_index);
const body = zir.bodySlice(extra.end, extra.data.body_len);
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
},
.switch_block => return findDeclsSwitch(zir, list, inst),
.switch_block, .switch_block_ref => return zir.findDeclsSwitch(gpa, list, defers, inst, .normal),
.switch_block_err_union => return zir.findDeclsSwitch(gpa, list, defers, inst, .err_union),
.suspend_block => @panic("TODO iterate suspend block"),
else => return, // Regular instruction, not interesting.
.param, .param_comptime => {
const inst_data = datas[@intFromEnum(inst)].pl_tok;
const extra = zir.extraData(Inst.Param, inst_data.payload_index);
const body = zir.bodySlice(extra.end, extra.data.body_len);
try zir.findDeclsBody(gpa, list, defers, body);
},
inline .call, .field_call => |tag| {
const inst_data = datas[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(switch (tag) {
.call => Inst.Call,
.field_call => Inst.FieldCall,
else => unreachable,
}, inst_data.payload_index);
// It's easiest to just combine all the arg bodies into one body, like we do above for `struct_decl`.
const args_len = extra.data.flags.args_len;
if (args_len > 0) {
const first_arg_start_off = args_len;
const final_arg_end_off = zir.extra[extra.end + args_len - 1];
const args_body = zir.bodySlice(extra.end + first_arg_start_off, final_arg_end_off - first_arg_start_off);
try zir.findDeclsBody(gpa, list, defers, args_body);
}
},
.@"defer" => {
const inst_data = datas[@intFromEnum(inst)].@"defer";
const gop = try defers.getOrPut(gpa, inst_data.index);
if (!gop.found_existing) {
const body = zir.bodySlice(inst_data.index, inst_data.len);
try zir.findDeclsBody(gpa, list, defers, body);
}
},
.defer_err_code => {
const inst_data = datas[@intFromEnum(inst)].defer_err_code;
const extra = zir.extraData(Inst.DeferErrCode, inst_data.payload_index).data;
const gop = try defers.getOrPut(gpa, extra.index);
if (!gop.found_existing) {
const body = zir.bodySlice(extra.index, extra.len);
try zir.findDeclsBody(gpa, list, defers, body);
}
},
}
}
fn findDeclsSwitch(
zir: Zir,
list: *std.ArrayList(Inst.Index),
gpa: Allocator,
list: *std.ArrayListUnmanaged(Inst.Index),
defers: *std.AutoHashMapUnmanaged(u32, void),
inst: Inst.Index,
/// Distinguishes between `switch_block[_ref]` and `switch_block_err_union`.
comptime kind: enum { normal, err_union },
) Allocator.Error!void {
const inst_data = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index);
const extra = zir.extraData(switch (kind) {
.normal => Inst.SwitchBlock,
.err_union => Inst.SwitchBlockErrUnion,
}, inst_data.payload_index);
var extra_index: usize = extra.end;
@@ -3793,18 +4326,35 @@ fn findDeclsSwitch(
break :blk multi_cases_len;
} else 0;
if (extra.data.bits.any_has_tag_capture) {
if (switch (kind) {
.normal => extra.data.bits.any_has_tag_capture,
.err_union => extra.data.bits.any_uses_err_capture,
}) {
extra_index += 1;
}
const special_prong = extra.data.bits.specialProng();
if (special_prong != .none) {
const has_special = switch (kind) {
.normal => extra.data.bits.specialProng() != .none,
.err_union => has_special: {
// Handle `non_err_body` first.
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findDeclsBody(gpa, list, defers, body);
break :has_special extra.data.bits.has_else;
},
};
if (has_special) {
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
}
{
@@ -3816,7 +4366,7 @@ fn findDeclsSwitch(
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
}
}
{
@@ -3833,18 +4383,20 @@ fn findDeclsSwitch(
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findDeclsBody(list, body);
try zir.findDeclsBody(gpa, list, defers, body);
}
}
}
fn findDeclsBody(
zir: Zir,
list: *std.ArrayList(Inst.Index),
gpa: Allocator,
list: *std.ArrayListUnmanaged(Inst.Index),
defers: *std.AutoHashMapUnmanaged(u32, void),
body: []const Inst.Index,
) Allocator.Error!void {
for (body) |member| {
try zir.findDeclsInner(list, member);
try zir.findDeclsInner(gpa, list, defers, member);
}
}
@@ -4042,7 +4594,7 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash {
return null;
}
const extra_index = extra.end +
1 +
extra.data.ret_body_len +
extra.data.body_len +
@typeInfo(Inst.Func.SrcLocs).Struct.fields.len;
return @bitCast([4]u32{
+74 -104
View File
@@ -2264,13 +2264,19 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
}
zcu.analysis_roots.clear();
try comp.queueJob(.{ .analyze_mod = std_mod });
if (comp.config.is_test) {
zcu.analysis_roots.appendAssumeCapacity(std_mod);
if (comp.config.is_test and zcu.main_mod != std_mod) {
try comp.queueJob(.{ .analyze_mod = zcu.main_mod });
zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod);
}
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
try comp.queueJob(.{ .analyze_mod = compiler_rt_mod });
zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
}
}
@@ -2294,7 +2300,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
zcu.intern_pool.dumpGenericInstances(gpa);
}
if (comp.config.is_test and comp.totalErrorCount() == 0) {
if (comp.config.is_test) {
// The `test_functions` decl has been intentionally postponed until now,
// at which point we must populate it with the list of test functions that
// have been discovered and not filtered out.
@@ -2304,7 +2310,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
try pt.processExports();
}
if (comp.totalErrorCount() != 0) {
if (try comp.totalErrorCount() != 0) {
// Skip flushing and keep source files loaded for error reporting.
comp.link_error_flags = .{};
return;
@@ -2388,7 +2394,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
try flush(comp, arena, .main, main_progress_node);
if (comp.totalErrorCount() != 0) return;
if (try comp.totalErrorCount() != 0) return;
// Failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
@@ -2405,7 +2412,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
},
.incremental => {
try flush(comp, arena, .main, main_progress_node);
if (comp.totalErrorCount() != 0) return;
},
}
}
@@ -3041,82 +3047,6 @@ fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void {
list.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len });
}
/// This function is temporally single-threaded.
pub fn totalErrorCount(comp: *Compilation) u32 {
var total: usize =
comp.misc_failures.count() +
@intFromBool(comp.alloc_failure_occurred) +
comp.lld_errors.items.len;
for (comp.failed_c_objects.values()) |bundle| {
total += bundle.diags.len;
}
for (comp.failed_win32_resources.values()) |errs| {
total += errs.errorMessageCount();
}
if (comp.module) |zcu| {
const ip = &zcu.intern_pool;
total += zcu.failed_exports.count();
total += zcu.failed_embed_files.count();
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
if (error_msg) |_| {
total += 1;
} else {
assert(file.zir_loaded);
const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)];
assert(payload_index != 0);
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
total += header.data.items_len;
}
}
// Skip errors for Decls within files that failed parsing.
// When a parse error is introduced, we keep all the semantic analysis for
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (zcu.failed_analysis.keys()) |anal_unit| {
const file_index = switch (anal_unit.unwrap()) {
.cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope,
.func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file,
};
if (zcu.fileByIndex(file_index).okToReportErrors()) {
total += 1;
if (zcu.cimport_errors.get(anal_unit)) |errors| {
total += errors.errorMessageCount();
}
}
}
if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) {
total += 1;
}
for (zcu.failed_codegen.keys()) |_| {
total += 1;
}
}
// The "no entry point found" error only counts if there are no semantic analysis errors.
if (total == 0) {
total += @intFromBool(comp.link_error_flags.no_entry_point_found);
}
total += @intFromBool(comp.link_error_flags.missing_libc);
total += comp.link_errors.items.len;
// Compile log errors only count if there are no other errors.
if (total == 0) {
if (comp.module) |zcu| {
total += @intFromBool(zcu.compile_log_sources.count() != 0);
}
}
return @as(u32, @intCast(total));
}
/// This function is temporally single-threaded.
pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
const gpa = comp.gpa;
@@ -3159,12 +3089,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
.msg = try bundle.addString("memory allocation failure"),
});
}
var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null;
defer if (all_references) |*a| a.deinit(gpa);
if (comp.module) |zcu| {
const ip = &zcu.intern_pool;
var all_references = try zcu.resolveReferences();
defer all_references.deinit(gpa);
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
if (error_msg) |msg| {
try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references);
@@ -3190,8 +3121,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool {
if (ctx.err.*) |_| return lhs_index < rhs_index;
const errors = ctx.zcu.failed_analysis.values();
const lhs_src_loc = errors[lhs_index].src_loc.upgrade(ctx.zcu);
const rhs_src_loc = errors[rhs_index].src_loc.upgrade(ctx.zcu);
const lhs_src_loc = errors[lhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse {
// LHS source location lost, so should never be referenced. Just sort it to the end.
return false;
};
const rhs_src_loc = errors[rhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse {
// RHS source location lost, so should never be referenced. Just sort it to the end.
return true;
};
return if (lhs_src_loc.file_scope != rhs_src_loc.file_scope) std.mem.order(
u8,
lhs_src_loc.file_scope.sub_file_path,
@@ -3212,9 +3149,16 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
if (err) |e| return e;
}
for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| {
if (comp.incremental) {
if (all_references == null) {
all_references = try zcu.resolveReferences();
}
if (!all_references.?.contains(anal_unit)) continue;
}
const file_index = switch (anal_unit.unwrap()) {
.cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope,
.func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file,
.func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file,
};
// Skip errors for AnalUnits within files that had a parse failure.
@@ -3243,7 +3187,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
}
}
for (zcu.failed_codegen.values()) |error_msg| {
for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| {
if (!zcu.navFileScope(nav).okToReportErrors()) continue;
try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references);
}
for (zcu.failed_exports.values()) |value| {
@@ -3304,9 +3249,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
if (comp.module) |zcu| {
if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) {
var all_references = try zcu.resolveReferences();
defer all_references.deinit(gpa);
const values = zcu.compile_log_sources.values();
// First one will be the error; subsequent ones will be notes.
const src_loc = values[0].src();
@@ -3328,12 +3270,30 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
}
assert(comp.totalErrorCount() == bundle.root_list.items.len);
if (comp.module) |zcu| {
if (comp.incremental and bundle.root_list.items.len == 0) {
const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
if (all_references == null) {
all_references = try zcu.resolveReferences();
}
if (all_references.?.contains(failed_unit)) break true;
} else false;
if (should_have_error) {
@panic("referenced transitive analysis errors, but none actually emitted");
}
}
}
const compile_log_text = if (comp.module) |m| m.compile_log_text.items else "";
return bundle.toOwnedBundle(compile_log_text);
}
fn totalErrorCount(comp: *Compilation) !u32 {
var errors = try comp.getAllErrorsAlloc();
defer errors.deinit(comp.gpa);
return errors.errorMessageCount();
}
pub const ErrorNoteHashContext = struct {
eb: *const ErrorBundle.Wip,
@@ -3384,7 +3344,7 @@ pub fn addModuleErrorMsg(
mod: *Zcu,
eb: *ErrorBundle.Wip,
module_err_msg: Zcu.ErrorMsg,
all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference),
all_references: *?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference),
) !void {
const gpa = eb.gpa;
const ip = &mod.intern_pool;
@@ -3408,13 +3368,18 @@ pub fn addModuleErrorMsg(
defer ref_traces.deinit(gpa);
if (module_err_msg.reference_trace_root.unwrap()) |rt_root| {
if (all_references.* == null) {
all_references.* = try mod.resolveReferences();
}
var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{};
defer seen.deinit(gpa);
const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len;
var referenced_by = rt_root;
while (all_references.get(referenced_by)) |ref| {
while (all_references.*.?.get(referenced_by)) |maybe_ref| {
const ref = maybe_ref orelse break;
const gop = try seen.getOrPut(gpa, ref.referencer);
if (gop.found_existing) break;
if (ref_traces.items.len < max_references) {
@@ -3423,6 +3388,7 @@ pub fn addModuleErrorMsg(
const span = try src.span(gpa);
const loc = std.zig.findLineColumn(source.bytes, span.main);
const rt_file_path = try src.file_scope.fullPath(gpa);
defer gpa.free(rt_file_path);
const name = switch (ref.referencer.unwrap()) {
.cau => |cau| switch (ip.getCau(cau).owner.unwrap()) {
.nav => |nav| ip.getNav(nav).name.toSlice(ip),
@@ -3537,6 +3503,8 @@ pub fn performAllTheWork(
mod.sema_prog_node = std.Progress.Node.none;
mod.codegen_prog_node.end();
mod.codegen_prog_node = std.Progress.Node.none;
mod.generation += 1;
};
try comp.performAllTheWorkInner(main_progress_node);
if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error;
@@ -3608,10 +3576,9 @@ fn performAllTheWorkInner(
// Pre-load these things from our single-threaded context since they
// will be needed by the worker threads.
const path_digest = zcu.filePathDigest(file_index);
const old_root_type = zcu.fileRootType(file_index);
const file = zcu.fileByIndex(file_index);
comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{
comp, file, file_index, path_digest, old_root_type, zir_prog_node, &astgen_wait_group, .root,
comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root,
});
}
}
@@ -3649,11 +3616,15 @@ fn performAllTheWorkInner(
}
try reportMultiModuleErrors(pt);
try zcu.flushRetryableFailures();
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
}
if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp});
if (!InternPool.single_threaded) {
comp.codegen_work.done = false; // may be `true` from a prior update
comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp});
}
defer if (!InternPool.single_threaded) {
{
comp.codegen_work.mutex.lock();
@@ -4283,7 +4254,6 @@ fn workerAstGenFile(
file: *Zcu.File,
file_index: Zcu.File.Index,
path_digest: Cache.BinDigest,
old_root_type: InternPool.Index,
prog_node: std.Progress.Node,
wg: *WaitGroup,
src: Zcu.AstGenSrc,
@@ -4292,7 +4262,7 @@ fn workerAstGenFile(
defer child_prog_node.end();
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
pt.astGenFile(file, path_digest, old_root_type) catch |err| switch (err) {
pt.astGenFile(file, path_digest) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
file.status = .retryable_failure;
@@ -4323,7 +4293,7 @@ fn workerAstGenFile(
// `@import("builtin")` is handled specially.
if (mem.eql(u8, import_path, "builtin")) continue;
const import_result, const imported_path_digest, const imported_root_type = blk: {
const import_result, const imported_path_digest = blk: {
comp.mutex.lock();
defer comp.mutex.unlock();
@@ -4338,8 +4308,7 @@ fn workerAstGenFile(
comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue;
};
const imported_path_digest = pt.zcu.filePathDigest(res.file_index);
const imported_root_type = pt.zcu.fileRootType(res.file_index);
break :blk .{ res, imported_path_digest, imported_root_type };
break :blk .{ res, imported_path_digest };
};
if (import_result.is_new) {
log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{
@@ -4350,7 +4319,7 @@ fn workerAstGenFile(
.import_tok = item.data.token,
} };
comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{
comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_type, prog_node, wg, sub_src,
comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src,
});
}
}
@@ -6443,7 +6412,8 @@ fn buildOutputFromZig(
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
assert(out.* == null);
// Under incremental compilation, `out` may already be populated from a prior update.
assert(out.* == null or comp.incremental);
out.* = try sub_compilation.toCrtFile();
}
+307 -48
View File
@@ -62,22 +62,60 @@ const want_multi_threaded = true;
/// Whether a single-threaded intern pool impl is in use.
pub const single_threaded = builtin.single_threaded or !want_multi_threaded;
/// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole
/// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both
/// the file which the instruction lives in, and the instruction index itself, which is updated on
/// incremental updates by `Zcu.updateZirRefs`.
pub const TrackedInst = extern struct {
file: FileIndex,
inst: Zir.Inst.Index,
comptime {
// The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(Zir.Inst.Index));
}
/// It is possible on an incremental update that we "lose" a ZIR instruction: some tracked `%x` in
/// the old ZIR failed to map to any `%y` in the new ZIR. For this reason, we actually store values
/// of type `MaybeLost`, which uses `ZirIndex.lost` to represent this case. `Index.resolve` etc
/// return `null` when the `TrackedInst` being resolved has been lost.
pub const MaybeLost = extern struct {
file: FileIndex,
inst: ZirIndex,
pub const ZirIndex = enum(u32) {
/// Tracking failed for this ZIR instruction. Uses of it should fail.
lost = std.math.maxInt(u32),
_,
pub fn unwrap(inst: ZirIndex) ?Zir.Inst.Index {
return switch (inst) {
.lost => null,
_ => @enumFromInt(@intFromEnum(inst)),
};
}
pub fn wrap(inst: Zir.Inst.Index) ZirIndex {
return @enumFromInt(@intFromEnum(inst));
}
};
comptime {
// The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(ZirIndex));
}
};
pub const Index = enum(u32) {
_,
pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) TrackedInst {
pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) ?TrackedInst {
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
return tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
return .{
.file = maybe_lost.file,
.inst = maybe_lost.inst.unwrap() orelse return null,
};
}
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index {
return i.resolveFull(ip).inst;
pub fn resolveFile(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) FileIndex {
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
return maybe_lost.file;
}
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) ?Zir.Inst.Index {
return (i.resolveFull(ip) orelse return null).inst;
}
pub fn toOptional(i: TrackedInst.Index) Optional {
@@ -120,7 +158,11 @@ pub fn trackZir(
tid: Zcu.PerThread.Id,
key: TrackedInst,
) Allocator.Error!TrackedInst.Index {
const full_hash = Hash.hash(0, std.mem.asBytes(&key));
const maybe_lost_key: TrackedInst.MaybeLost = .{
.file = key.file,
.inst = TrackedInst.MaybeLost.ZirIndex.wrap(key.inst),
};
const full_hash = Hash.hash(0, std.mem.asBytes(&maybe_lost_key));
const hash: u32 = @truncate(full_hash >> 32);
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
var map = shard.shared.tracked_inst_map.acquire();
@@ -132,12 +174,11 @@ pub fn trackZir(
const entry = &map.entries[map_index];
const index = entry.acquire().unwrap() orelse break;
if (entry.hash != hash) continue;
if (std.meta.eql(index.resolveFull(ip), key)) return index;
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
}
shard.mutate.tracked_inst_map.mutex.lock();
defer shard.mutate.tracked_inst_map.mutex.unlock();
if (map.entries != shard.shared.tracked_inst_map.entries) {
shard.mutate.tracked_inst_map.len += 1;
map = shard.shared.tracked_inst_map;
map_mask = map.header().mask();
map_index = hash;
@@ -147,7 +188,7 @@ pub fn trackZir(
const entry = &map.entries[map_index];
const index = entry.acquire().unwrap() orelse break;
if (entry.hash != hash) continue;
if (std.meta.eql(index.resolveFull(ip), key)) return index;
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
}
defer shard.mutate.tracked_inst_map.len += 1;
const local = ip.getLocal(tid);
@@ -161,7 +202,7 @@ pub fn trackZir(
.tid = tid,
.index = list.mutate.len,
}).wrap(ip);
list.appendAssumeCapacity(.{key});
list.appendAssumeCapacity(.{maybe_lost_key});
entry.release(index.toOptional());
return index;
}
@@ -205,12 +246,94 @@ pub fn trackZir(
.tid = tid,
.index = list.mutate.len,
}).wrap(ip);
list.appendAssumeCapacity(.{key});
list.appendAssumeCapacity(.{maybe_lost_key});
map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash };
shard.shared.tracked_inst_map.release(new_map);
return index;
}
/// At the start of an incremental update, we update every entry in `tracked_insts` to include
/// the new ZIR index. Once this is done, we must update the hashmap metadata so that lookups
/// return correct entries where they already exist.
pub fn rehashTrackedInsts(
ip: *InternPool,
gpa: Allocator,
tid: Zcu.PerThread.Id,
) Allocator.Error!void {
assert(tid == .main); // we shouldn't have any other threads active right now
// TODO: this function doesn't handle OOM well. What should it do?
// We don't lock anything, as this function assumes that no other thread is
// accessing `tracked_insts`. This is necessary because we're going to be
// iterating the `TrackedInst`s in each `Local`, so we have to know that
// none will be added as we work.
// Figure out how big each shard need to be and store it in its mutate `len`.
for (ip.shards) |*shard| shard.mutate.tracked_inst_map.len = 0;
for (ip.locals) |*local| {
// `getMutableTrackedInsts` is okay only because no other thread is currently active.
// We need the `mutate` for the len.
for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0")) |tracked_inst| {
if (tracked_inst.inst == .lost) continue; // we can ignore this one!
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
shard.mutate.tracked_inst_map.len += 1;
}
}
const Map = Shard.Map(TrackedInst.Index.Optional);
const arena_state = &ip.getLocal(tid).mutate.arena;
// We know how big each shard must be, so ensure we have the capacity we need.
for (ip.shards) |*shard| {
const want_capacity = std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3) catch unreachable;
const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex
if (have_capacity >= want_capacity) {
@memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined });
continue;
}
var arena = arena_state.promote(gpa);
defer arena_state.* = arena.state;
const new_map_buf = try arena.allocator().alignedAlloc(
u8,
Map.alignment,
Map.entries_offset + want_capacity * @sizeOf(Map.Entry),
);
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
new_map.header().* = .{ .capacity = want_capacity };
@memset(new_map.entries[0..want_capacity], .{ .value = .none, .hash = undefined });
shard.shared.tracked_inst_map.release(new_map);
}
// Now, actually insert the items.
for (ip.locals, 0..) |*local, local_tid| {
// `getMutableTrackedInsts` is okay only because no other thread is currently active.
// We need the `mutate` for the len.
for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| {
if (tracked_inst.inst == .lost) continue; // we can ignore this one!
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
const hash: u32 = @truncate(full_hash >> 32);
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
const map = shard.shared.tracked_inst_map; // no acquire because we hold the mutex
const map_mask = map.header().mask();
var map_index = hash;
const entry = while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
if (entry.acquire() == .none) break entry;
};
const index = TrackedInst.Index.Unwrapped.wrap(.{
.tid = @enumFromInt(local_tid),
.index = @intCast(local_inst_index),
}, ip);
entry.hash = hash;
entry.release(index.toOptional());
}
}
}
/// Analysis Unit. Represents a single entity which undergoes semantic analysis.
/// This is either a `Cau` or a runtime function.
/// The LSB is used as a tag bit.
@@ -572,10 +695,6 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI
.ip = ip,
.next_entry = .none,
};
if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{
.ip = ip,
.next_entry = .none,
};
return .{
.ip = ip,
.next_entry = first_entry.toOptional(),
@@ -612,7 +731,6 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) {
// Dummy entry, so we can reuse it rather than allocating a new one!
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none;
break :new_index gop.value_ptr.*;
}
@@ -620,7 +738,12 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: {
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none;
if (gop.found_existing) {
ptr.next = gop.value_ptr.*.toOptional();
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional();
} else {
ptr.next = .none;
}
gop.value_ptr.* = new_index;
break :new_index new_index;
},
@@ -642,10 +765,9 @@ pub const NamespaceNameKey = struct {
};
pub const DepEntry = extern struct {
/// If null, this is a dummy entry - all other fields are `undefined`. It is
/// the first and only entry in one of `intern_pool.*_deps`, and does not
/// appear in any list by `first_dependency`, but is not in
/// `free_dep_entries` since `*_deps` stores a reference to it.
/// If null, this is a dummy entry. `next_dependee` is undefined. This is the first
/// entry in one of `*_deps`, and does not appear in any list by `first_dependency`,
/// but is not in `free_dep_entries` since `*_deps` stores a reference to it.
depender: AnalUnit.Optional,
/// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee.
/// Used to iterate all dependers for a given dependee during an update.
@@ -684,6 +806,14 @@ const Local = struct {
/// This state is fully local to the owning thread and does not require any
/// atomic access.
mutate: struct {
/// When we need to allocate any long-lived buffer for mutating the `InternPool`, it is
/// allocated into this `arena` (for the `Id` of the thread performing the mutation). An
/// arena is used to avoid contention on the GPA, and to ensure that any code which retains
/// references to old state remains valid. For instance, when reallocing hashmap metadata,
/// a racing lookup on another thread may still retain a handle to the old metadata pointer,
/// so it must remain valid.
/// This arena's lifetime is tied to that of `Compilation`, although it can be cleared on
/// garbage collection (currently vaporware).
arena: std.heap.ArenaAllocator.State,
items: ListMutate,
@@ -728,7 +858,7 @@ const Local = struct {
else => @compileError("unsupported host"),
};
const Strings = List(struct { u8 });
const TrackedInsts = List(struct { TrackedInst });
const TrackedInsts = List(struct { TrackedInst.MaybeLost });
const Maps = List(struct { FieldMap });
const Caus = List(struct { Cau });
const Navs = List(Nav.Repr);
@@ -959,6 +1089,14 @@ const Local = struct {
mutable.list.release(new_list);
}
pub fn viewAllowEmpty(mutable: Mutable) View {
const capacity = mutable.list.header().capacity;
return .{
.bytes = mutable.list.bytes,
.len = mutable.mutate.len,
.capacity = capacity,
};
}
pub fn view(mutable: Mutable) View {
const capacity = mutable.list.header().capacity;
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
@@ -996,7 +1134,6 @@ const Local = struct {
fn header(list: ListSelf) *Header {
return @ptrFromInt(@intFromPtr(list.bytes) - bytes_offset);
}
pub fn view(list: ListSelf) View {
const capacity = list.header().capacity;
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
@@ -2570,7 +2707,12 @@ pub const Key = union(enum) {
.variable => |a_info| {
const b_info = b.variable;
return a_info.owner_nav == b_info.owner_nav;
return a_info.owner_nav == b_info.owner_nav and
a_info.ty == b_info.ty and
a_info.init == b_info.init and
a_info.lib_name == b_info.lib_name and
a_info.is_threadlocal == b_info.is_threadlocal and
a_info.is_weak_linkage == b_info.is_weak_linkage;
},
.@"extern" => |a_info| {
const b_info = b.@"extern";
@@ -6958,6 +7100,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
const index = entry.acquire();
if (index == .none) break;
if (entry.hash != hash) continue;
if (ip.isRemoved(index)) continue;
if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index };
}
shard.mutate.map.mutex.lock();
@@ -7032,6 +7175,43 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
.map_index = map_index,
} };
}
/// Like `getOrPutKey`, but asserts that the key already exists, and prepares to replace
/// its shard entry with a new `Index` anyway. After finalizing this, the old index remains
/// valid (in that `indexToKey` and similar queries will behave as before), but it will
/// never be returned from a lookup (`getOrPutKey` etc).
/// This is used by incremental compilation when an existing container type is outdated. In
/// this case, the type must be recreated at a new `InternPool.Index`, but the old index must
/// remain valid since now-unreferenced `AnalUnit`s may retain references to it. The old index
/// will be cleaned up when the `Zcu` undergoes garbage collection.
fn putKeyReplace(
ip: *InternPool,
tid: Zcu.PerThread.Id,
key: Key,
) GetOrPutKey {
const full_hash = key.hash64(ip);
const hash: u32 = @truncate(full_hash >> 32);
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
shard.mutate.map.mutex.lock();
errdefer shard.mutate.map.mutex.unlock();
const map = shard.shared.map;
const map_mask = map.header().mask();
var map_index = hash;
while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
const index = entry.value;
assert(index != .none); // key not present
if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) {
break; // we found the entry to replace
}
}
return .{ .new = .{
.ip = ip,
.tid = tid,
.shard = shard,
.map_index = map_index,
} };
}
pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
var gop = try ip.getOrPutKey(gpa, tid, key);
@@ -7859,6 +8039,10 @@ pub const UnionTypeInit = struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
declared_owned_captures: struct {
zir_index: TrackedInst.Index,
captures: CaptureValue.Slice,
},
reified: struct {
zir_index: TrackedInst.Index,
type_hash: u64,
@@ -7871,17 +8055,28 @@ pub fn getUnionType(
gpa: Allocator,
tid: Zcu.PerThread.Id,
ini: UnionTypeInit,
/// If it is known that there is an existing type with this key which is outdated,
/// this is passed as `true`, and the type is replaced with one at a fresh index.
replace_existing: bool,
) Allocator.Error!WipNamespaceType.Result {
var gop = try ip.getOrPutKey(gpa, tid, .{ .union_type = switch (ini.key) {
const key: Key = .{ .union_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.declared_owned_captures => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .owned = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = r.type_hash,
} },
} });
} };
var gop = if (replace_existing)
ip.putKeyReplace(tid, key)
else
try ip.getOrPutKey(gpa, tid, key);
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
@@ -7896,7 +8091,7 @@ pub fn getUnionType(
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
@@ -7905,7 +8100,10 @@ pub fn getUnionType(
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{
.flags = .{
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
.any_captures = switch (ini.key) {
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
.reified => false,
},
.runtime_tag = ini.flags.runtime_tag,
.any_aligned_fields = ini.flags.any_aligned_fields,
.layout = ini.flags.layout,
@@ -7914,7 +8112,10 @@ pub fn getUnionType(
.assumed_runtime_bits = ini.flags.assumed_runtime_bits,
.assumed_pointer_aligned = ini.flags.assumed_pointer_aligned,
.alignment = ini.flags.alignment,
.is_reified = ini.key == .reified,
.is_reified = switch (ini.key) {
.declared, .declared_owned_captures => false,
.reified => true,
},
},
.fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
@@ -7938,6 +8139,10 @@ pub fn getUnionType(
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
},
.declared_owned_captures => |d| if (d.captures.len != 0) {
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
},
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
}
@@ -8035,6 +8240,10 @@ pub const StructTypeInit = struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
declared_owned_captures: struct {
zir_index: TrackedInst.Index,
captures: CaptureValue.Slice,
},
reified: struct {
zir_index: TrackedInst.Index,
type_hash: u64,
@@ -8047,17 +8256,28 @@ pub fn getStructType(
gpa: Allocator,
tid: Zcu.PerThread.Id,
ini: StructTypeInit,
/// If it is known that there is an existing type with this key which is outdated,
/// this is passed as `true`, and the type is replaced with one at a fresh index.
replace_existing: bool,
) Allocator.Error!WipNamespaceType.Result {
var gop = try ip.getOrPutKey(gpa, tid, .{ .struct_type = switch (ini.key) {
const key: Key = .{ .struct_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.declared_owned_captures => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .owned = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = r.type_hash,
} },
} });
} };
var gop = if (replace_existing)
ip.putKeyReplace(tid, key)
else
try ip.getOrPutKey(gpa, tid, key);
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
@@ -8080,7 +8300,7 @@ pub fn getStructType(
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
@@ -8096,10 +8316,16 @@ pub fn getStructType(
.backing_int_ty = .none,
.names_map = names_map,
.flags = .{
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
.any_captures = switch (ini.key) {
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
.reified => false,
},
.field_inits_wip = false,
.inits_resolved = ini.inits_resolved,
.is_reified = ini.key == .reified,
.is_reified = switch (ini.key) {
.declared, .declared_owned_captures => false,
.reified => true,
},
},
});
try items.append(.{
@@ -8111,6 +8337,10 @@ pub fn getStructType(
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
},
.declared_owned_captures => |d| if (d.captures.len != 0) {
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
},
.reified => |r| {
_ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
},
@@ -8138,7 +8368,7 @@ pub fn getStructType(
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
@@ -8153,7 +8383,10 @@ pub fn getStructType(
.fields_len = ini.fields_len,
.size = std.math.maxInt(u32),
.flags = .{
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
.any_captures = switch (ini.key) {
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
.reified => false,
},
.is_extern = is_extern,
.known_non_opv = ini.known_non_opv,
.requires_comptime = ini.requires_comptime,
@@ -8171,7 +8404,10 @@ pub fn getStructType(
.field_inits_wip = false,
.inits_resolved = ini.inits_resolved,
.fully_resolved = false,
.is_reified = ini.key == .reified,
.is_reified = switch (ini.key) {
.declared, .declared_owned_captures => false,
.reified => true,
},
},
});
try items.append(.{
@@ -8183,6 +8419,10 @@ pub fn getStructType(
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
},
.declared_owned_captures => |d| if (d.captures.len != 0) {
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
},
.reified => |r| {
_ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
},
@@ -8986,6 +9226,10 @@ pub const EnumTypeInit = struct {
zir_index: TrackedInst.Index,
captures: []const CaptureValue,
},
declared_owned_captures: struct {
zir_index: TrackedInst.Index,
captures: CaptureValue.Slice,
},
reified: struct {
zir_index: TrackedInst.Index,
type_hash: u64,
@@ -9081,17 +9325,28 @@ pub fn getEnumType(
gpa: Allocator,
tid: Zcu.PerThread.Id,
ini: EnumTypeInit,
/// If it is known that there is an existing type with this key which is outdated,
/// this is passed as `true`, and the type is replaced with one at a fresh index.
replace_existing: bool,
) Allocator.Error!WipEnumType.Result {
var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = switch (ini.key) {
const key: Key = .{ .enum_type = switch (ini.key) {
.declared => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .external = d.captures },
} },
.declared_owned_captures => |d| .{ .declared = .{
.zir_index = d.zir_index,
.captures = .{ .owned = d.captures },
} },
.reified => |r| .{ .reified = .{
.zir_index = r.zir_index,
.type_hash = r.type_hash,
} },
} });
} };
var gop = if (replace_existing)
ip.putKeyReplace(tid, key)
else
try ip.getOrPutKey(gpa, tid, key);
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
@@ -9110,7 +9365,7 @@ pub fn getEnumType(
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| d.captures.len,
inline .declared, .declared_owned_captures => |d| d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
@@ -9120,7 +9375,7 @@ pub fn getEnumType(
const extra_index = addExtraAssumeCapacity(extra, EnumAuto{
.name = undefined, // set by `prepare`
.captures_len = switch (ini.key) {
.declared => |d| @intCast(d.captures.len),
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
},
.namespace = undefined, // set by `prepare`
@@ -9139,6 +9394,7 @@ pub fn getEnumType(
extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish`
switch (ini.key) {
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
.declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
}
const names_start = extra.mutate.len;
@@ -9169,7 +9425,7 @@ pub fn getEnumType(
// TODO: fmt bug
// zig fmt: off
switch (ini.key) {
.declared => |d| d.captures.len,
inline .declared, .declared_owned_captures => |d| d.captures.len,
.reified => 2, // type_hash: PackedU64
} +
// zig fmt: on
@@ -9180,7 +9436,7 @@ pub fn getEnumType(
const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{
.name = undefined, // set by `prepare`
.captures_len = switch (ini.key) {
.declared => |d| @intCast(d.captures.len),
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
.reified => std.math.maxInt(u32),
},
.namespace = undefined, // set by `prepare`
@@ -9204,6 +9460,7 @@ pub fn getEnumType(
extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish`
switch (ini.key) {
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
.declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
}
const names_start = extra.mutate.len;
@@ -9267,10 +9524,12 @@ pub fn getGeneratedTagEnumType(
.tid = tid,
.index = items.mutate.len,
}, ip);
const parent_namespace = ip.namespacePtr(ini.parent_namespace);
const namespace = try ip.createNamespace(gpa, tid, .{
.parent = ini.parent_namespace.toOptional(),
.owner_type = enum_index,
.file_scope = ip.namespacePtr(ini.parent_namespace).file_scope,
.file_scope = parent_namespace.file_scope,
.generation = parent_namespace.generation,
});
errdefer ip.destroyNamespace(tid, namespace);
@@ -10866,6 +11125,7 @@ pub fn destroyNamespace(
.parent = undefined,
.file_scope = undefined,
.owner_type = undefined,
.generation = undefined,
};
@field(namespace, Local.namespace_next_free_field) =
@enumFromInt(local.mutate.namespaces.free_list);
@@ -11000,7 +11260,6 @@ pub fn getOrPutTrailingString(
shard.mutate.string_map.mutex.lock();
defer shard.mutate.string_map.mutex.unlock();
if (map.entries != shard.shared.string_map.entries) {
shard.mutate.string_map.len += 1;
map = shard.shared.string_map;
map_mask = map.header().mask();
map_index = hash;
+375 -266
View File
@@ -110,6 +110,12 @@ exports: std.ArrayListUnmanaged(Zcu.Export) = .{},
/// of data stored in `Zcu.all_references`. It exists to avoid adding references to
/// a given `AnalUnit` multiple times.
references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
/// All dependencies registered so far by this `Sema`. This is a temporary duplicate
/// of the main dependency data. It exists to avoid adding dependencies to a given
/// `AnalUnit` multiple times.
dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{},
const MaybeComptimeAlloc = struct {
/// The runtime index of the `alloc` instruction.
@@ -877,6 +883,8 @@ pub fn deinit(sema: *Sema) void {
sema.comptime_allocs.deinit(gpa);
sema.exports.deinit(gpa);
sema.references.deinit(gpa);
sema.type_references.deinit(gpa);
sema.dependencies.deinit(gpa);
sema.* = undefined;
}
@@ -999,7 +1007,7 @@ fn analyzeBodyInner(
// The hashmap lookup in here is a little expensive, and LLVM fails to optimize it away.
if (build_options.enable_logging) {
std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ sub_file_path: {
const file_index = block.src_base_inst.resolveFull(&zcu.intern_pool).file;
const file_index = block.src_base_inst.resolveFile(&zcu.intern_pool);
const file = zcu.fileByIndex(file_index);
break :sub_file_path file.sub_file_path;
}, inst });
@@ -2496,12 +2504,12 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error
const mod = sema.pt.zcu;
if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) {
var all_references = mod.resolveReferences() catch @panic("out of memory");
var all_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?Zcu.ResolvedReference) = null;
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
wip_errors.init(gpa) catch @panic("out of memory");
Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch unreachable;
Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory");
std.debug.print("compile error during Sema:\n", .{});
var error_bundle = wip_errors.toOwnedBundle("") catch unreachable;
var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory");
error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
crash_report.compilerPanic("unexpected compile error occurred", null, null);
}
@@ -2715,33 +2723,6 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) {
return new;
}
/// Given a type just looked up in the `InternPool`, check whether it is
/// considered outdated on this update. If so, remove it from the pool
/// and return `true`.
fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
if (!zcu.comp.incremental) return false;
const cau_index = switch (ip.indexToKey(ty)) {
.struct_type => ip.loadStructType(ty).cau.unwrap().?,
.union_type => ip.loadUnionType(ty).cau,
.enum_type => ip.loadEnumType(ty).cau.unwrap().?,
else => unreachable,
};
const cau_unit = AnalUnit.wrap(.{ .cau = cau_index });
const was_outdated = zcu.outdated.swapRemove(cau_unit) or
zcu.potentially_outdated.swapRemove(cau_unit);
if (!was_outdated) return false;
_ = zcu.outdated_ready.swapRemove(cau_unit);
zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit);
zcu.intern_pool.remove(pt.tid, ty);
try zcu.markDependeeOutdated(.{ .interned = ty });
return true;
}
fn zirStructDecl(
sema: *Sema,
block: *Block,
@@ -2807,10 +2788,17 @@ fn zirStructDecl(
.captures = captures,
} },
};
const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
break :wip (try ip.getStructType(gpa, pt.tid, struct_init)).wip;
const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) {
.existing => |ty| {
const new_ty = try pt.ensureTypeUpToDate(ty, false);
// Make sure we update the namespace if the declaration is re-analyzed, to pick
// up on e.g. changed comptime decls.
try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
try sema.declareDependency(.{ .interned = new_ty });
try sema.addTypeReferenceEntry(src, new_ty);
return Air.internedToRef(new_ty);
},
.wip => |wip| wip,
});
@@ -2828,6 +2816,7 @@ fn zirStructDecl(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
errdefer pt.destroyNamespace(new_namespace_index);
@@ -2850,8 +2839,8 @@ fn zirStructDecl(
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
@@ -2873,7 +2862,7 @@ fn createTypeName(
.anon => {}, // handled after switch
.parent => return block.type_name_ctx,
.func => func_strat: {
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip));
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail);
const zir_tags = sema.code.instructions.items(.tag);
var buf: std.ArrayListUnmanaged(u8) = .{};
@@ -2966,7 +2955,6 @@ fn zirEnumDecl(
const tracked_inst = try block.trackZir(inst);
const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) };
const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } };
const tag_type_ref = if (small.has_tag_type) blk: {
const tag_type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
@@ -3029,10 +3017,17 @@ fn zirEnumDecl(
.captures = captures,
} },
};
const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
break :wip (try ip.getEnumType(gpa, pt.tid, enum_init)).wip;
const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) {
.existing => |ty| {
const new_ty = try pt.ensureTypeUpToDate(ty, false);
// Make sure we update the namespace if the declaration is re-analyzed, to pick
// up on e.g. changed comptime decls.
try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
try sema.declareDependency(.{ .interned = new_ty });
try sema.addTypeReferenceEntry(src, new_ty);
return Air.internedToRef(new_ty);
},
.wip => |wip| wip,
});
@@ -3056,167 +3051,38 @@ fn zirEnumDecl(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
errdefer if (!done) pt.destroyNamespace(new_namespace_index);
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
if (pt.zcu.comp.incremental) {
try mod.intern_pool.addDependency(
gpa,
AnalUnit.wrap(.{ .cau = new_cau_index }),
.{ .src_hash = try block.trackZir(inst) },
);
}
try pt.scanNamespace(new_namespace_index, decls);
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
// We've finished the initial construction of this type, and are about to perform analysis.
// Set the Cau and namespace appropriately, and don't destroy anything on failure.
wip_ty.prepare(ip, new_cau_index, new_namespace_index);
done = true;
const int_tag_ty = ty: {
// We create a block for the field type instructions because they
// may need to reference Decls from inside the enum namespace.
// Within the field type, default value, and alignment expressions, the owner should be the enum's `Cau`.
const prev_owner = sema.owner;
sema.owner = AnalUnit.wrap(.{ .cau = new_cau_index });
defer sema.owner = prev_owner;
const prev_func_index = sema.func_index;
sema.func_index = .none;
defer sema.func_index = prev_func_index;
var enum_block: Block = .{
.parent = null,
.sema = sema,
.namespace = new_namespace_index,
.instructions = .{},
.inlining = null,
.is_comptime = true,
.src_base_inst = tracked_inst,
.type_name_ctx = type_name,
};
defer enum_block.instructions.deinit(sema.gpa);
if (body.len != 0) {
_ = try sema.analyzeInlineBody(&enum_block, body, inst);
}
if (tag_type_ref != .none) {
const ty = try sema.resolveType(&enum_block, tag_ty_src, tag_type_ref);
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)});
}
break :ty ty;
} else if (fields_len == 0) {
break :ty try pt.intType(.unsigned, 0);
} else {
const bits = std.math.log2_int_ceil(usize, fields_len);
break :ty try pt.intType(.unsigned, bits);
}
};
wip_ty.setTagTy(ip, int_tag_ty.toIntern());
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) {
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
}
}
var bit_bag_index: usize = body_end;
var cur_bit_bag: u32 = undefined;
var field_i: u32 = 0;
var last_tag_val: ?Value = null;
while (field_i < fields_len) : (field_i += 1) {
if (field_i % 32 == 0) {
cur_bit_bag = sema.code.extra[bit_bag_index];
bit_bag_index += 1;
}
const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
cur_bit_bag >>= 1;
const field_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
const field_name_zir = sema.code.nullTerminatedString(field_name_index);
extra_index += 2; // field name, doc comment
const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
const value_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = field_i },
};
const tag_overflow = if (has_tag_value) overflow: {
const tag_val_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const tag_inst = try sema.resolveInst(tag_val_ref);
last_tag_val = try sema.resolveConstDefinedValue(block, .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_name = field_i },
}, tag_inst, .{
.needed_comptime_reason = "enum tag value must be comptime-known",
});
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const other_field_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = conflict.prev_field_idx },
};
const msg = msg: {
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
break :overflow false;
} else if (any_values) overflow: {
var overflow: ?usize = null;
last_tag_val = if (last_tag_val) |val|
try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
else
try pt.intValue(int_tag_ty, 0);
if (overflow != null) break :overflow true;
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const other_field_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = conflict.prev_field_idx },
};
const msg = msg: {
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
break :overflow false;
} else overflow: {
assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null);
last_tag_val = try pt.intValue(Type.comptime_int, field_i);
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
break :overflow false;
};
if (tag_overflow) {
const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{
last_tag_val.?.fmtValueSema(pt, sema), int_tag_ty.fmt(pt),
});
return sema.failWithOwnedErrorMsg(block, msg);
}
}
try Sema.resolveDeclaredEnum(
pt,
wip_ty,
inst,
tracked_inst,
new_namespace_index,
type_name,
new_cau_index,
small,
body,
tag_type_ref,
any_values,
fields_len,
sema.code,
body_end,
);
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
@@ -3295,10 +3161,17 @@ fn zirUnionDecl(
.captures = captures,
} },
};
const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init)) {
.existing => |ty| wip: {
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
break :wip (try ip.getUnionType(gpa, pt.tid, union_init)).wip;
const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) {
.existing => |ty| {
const new_ty = try pt.ensureTypeUpToDate(ty, false);
// Make sure we update the namespace if the declaration is re-analyzed, to pick
// up on e.g. changed comptime decls.
try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
try sema.declareDependency(.{ .interned = new_ty });
try sema.addTypeReferenceEntry(src, new_ty);
return Air.internedToRef(new_ty);
},
.wip => |wip| wip,
});
@@ -3316,6 +3189,7 @@ fn zirUnionDecl(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
errdefer pt.destroyNamespace(new_namespace_index);
@@ -3325,7 +3199,7 @@ fn zirUnionDecl(
try mod.intern_pool.addDependency(
gpa,
AnalUnit.wrap(.{ .cau = new_cau_index }),
.{ .src_hash = try block.trackZir(inst) },
.{ .src_hash = tracked_inst },
);
}
@@ -3338,8 +3212,8 @@ fn zirUnionDecl(
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
@@ -3387,8 +3261,15 @@ fn zirOpaqueDecl(
};
// No `wrapWipTy` needed as no std.builtin types are opaque.
const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) {
// No `maybeRemoveOutdatedType` as opaque types are never outdated.
.existing => |ty| return Air.internedToRef(ty),
.existing => |ty| {
// Make sure we update the namespace if the declaration is re-analyzed, to pick
// up on e.g. changed comptime decls.
try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(mod));
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
},
.wip => |wip| wip,
};
errdefer wip_ty.cancel(ip, pt.tid);
@@ -3405,6 +3286,7 @@ fn zirOpaqueDecl(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
errdefer pt.destroyNamespace(new_namespace_index);
@@ -3416,6 +3298,7 @@ fn zirOpaqueDecl(
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index));
}
@@ -5487,7 +5370,7 @@ fn failWithBadMemberAccess(
.Enum => "enum",
else => unreachable,
};
if (agg_ty.typeDeclInst(zcu)) |inst| if (inst.resolve(ip) == .main_struct_inst) {
if (agg_ty.typeDeclInst(zcu)) |inst| if ((inst.resolve(ip) orelse return error.AnalysisFail) == .main_struct_inst) {
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{
agg_ty.fmt(pt), field_name.fmt(ip),
});
@@ -6041,15 +5924,17 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
const path_digest = zcu.filePathDigest(result.file_index);
const old_root_type = zcu.fileRootType(result.file_index);
pt.astGenFile(result.file, path_digest, old_root_type) catch |err|
pt.astGenFile(result.file, path_digest) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
// TODO: register some kind of dependency on the file.
// That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to
// trigger re-analysis later.
try pt.ensureFileAnalyzed(result.file_index);
return Air.internedToRef(zcu.fileRootType(result.file_index));
const ty = zcu.fileRootType(result.file_index);
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
}
fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6797,12 +6682,21 @@ fn lookupInNamespace(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
try pt.ensureNamespaceUpToDate(namespace_index);
const namespace = zcu.namespacePtr(namespace_index);
const adapter: Zcu.Namespace.NameAdapter = .{ .zcu = zcu };
const src_file = zcu.namespacePtr(block.namespace).file_scope;
if (Type.fromInterned(namespace.owner_type).typeDeclInst(zcu)) |type_decl_inst| {
try sema.declareDependency(.{ .namespace_name = .{
.namespace = type_decl_inst,
.name = ident_name,
} });
}
if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) {
const gpa = sema.gpa;
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{};
@@ -7528,14 +7422,14 @@ fn analyzeCall(
operation: CallOperation,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const callee_ty = sema.typeOf(func);
const func_ty_info = mod.typeToFunc(func_ty).?;
const func_ty_info = zcu.typeToFunc(func_ty).?;
const cc = func_ty_info.cc;
if (try sema.resolveValue(func)) |func_val|
if (func_val.isUndef(mod))
if (func_val.isUndef(zcu))
return sema.failWithUseOfUndef(block, call_src);
if (cc == .Naked) {
const maybe_func_inst = try sema.funcDeclSrcInst(func);
@@ -7647,7 +7541,7 @@ fn analyzeCall(
.needed_comptime_reason = "function being called at comptime must be comptime-known",
.block_comptime_reason = comptime_reason,
});
const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
const module_fn_index = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) {
.@"extern" => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}),
@@ -7664,7 +7558,7 @@ fn analyzeCall(
},
else => {},
}
assert(callee_ty.isPtrAtRuntime(mod));
assert(callee_ty.isPtrAtRuntime(zcu));
return sema.fail(block, call_src, "{s} call of function pointer", .{
if (is_comptime_call) "comptime" else "inline",
});
@@ -7704,7 +7598,7 @@ fn analyzeCall(
},
};
const module_fn = mod.funcInfo(module_fn_index);
const module_fn = zcu.funcInfo(module_fn_index);
// This is not a function instance, so the function's `Nav` has a
// `Cau` -- we don't need to check `generic_owner`.
@@ -7718,7 +7612,7 @@ fn analyzeCall(
// whenever performing an operation where the difference matters.
var ics = InlineCallSema.init(
sema,
mod.cauFileScope(fn_cau_index).zir,
zcu.cauFileScope(fn_cau_index).zir,
module_fn_index,
block.error_return_trace_index,
);
@@ -7752,13 +7646,16 @@ fn analyzeCall(
// Whether this call should be memoized, set to false if the call can
// mutate comptime state.
var should_memoize = true;
// TODO: comptime call memoization is currently not supported under incremental compilation
// since dependencies are not marked on callers. If we want to keep this around (we should
// check that it's worthwhile first!), each memoized call needs a `Cau`.
var should_memoize = !zcu.comp.incremental;
// If it's a comptime function call, we need to memoize it as long as no external
// comptime memory is mutated.
const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
const owner_info = mod.typeToFunc(Type.fromInterned(module_fn.ty)).?;
const owner_info = zcu.typeToFunc(Type.fromInterned(module_fn.ty)).?;
const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len);
var new_fn_info: InternPool.GetFuncTypeKey = .{
.param_types = new_param_types,
@@ -7778,7 +7675,7 @@ fn analyzeCall(
// the AIR instructions of the callsite. The callee could be a generic function
// which means its parameter type expressions must be resolved in order and used
// to successively coerce the arguments.
const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip));
const fn_info = ics.callee().code.getFnInfo(module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail);
try ics.callee().inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body);
var arg_i: u32 = 0;
@@ -7823,7 +7720,7 @@ fn analyzeCall(
// each of the parameters, resolving the return type and providing it to the child
// `Sema` so that it can be used for the `ret_ptr` instruction.
const ret_ty_inst = if (fn_info.ret_ty_body.len != 0)
try sema.resolveInlineBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip))
try sema.resolveInlineBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst.resolve(ip) orelse return error.AnalysisFail)
else
try sema.resolveInst(fn_info.ret_ty_ref);
const ret_ty_src: LazySrcLoc = .{ .base_node_inst = module_fn.zir_body_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 } };
@@ -7843,12 +7740,12 @@ fn analyzeCall(
// bug generating invalid LLVM IR.
const res2: Air.Inst.Ref = res2: {
if (should_memoize and is_comptime_call) {
if (mod.intern_pool.getIfExists(.{ .memoized_call = .{
if (zcu.intern_pool.getIfExists(.{ .memoized_call = .{
.func = module_fn_index,
.arg_values = memoized_arg_values,
.result = .none,
} })) |memoized_call_index| {
const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call;
const memoized_call = zcu.intern_pool.indexToKey(memoized_call_index).memoized_call;
break :res2 Air.internedToRef(memoized_call.result);
}
}
@@ -7907,7 +7804,7 @@ fn analyzeCall(
// a reference to `comptime_allocs` so is not stable across instances of `Sema`.
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(mod)) {
if (should_memoize and !Value.fromInterned(result_interned).canMutateComptimeVarState(zcu)) {
_ = try pt.intern(.{ .memoized_call = .{
.func = module_fn_index,
.arg_values = memoized_arg_values,
@@ -7946,7 +7843,7 @@ fn analyzeCall(
if (param_ty) |t| assert(!t.isGenericPoison());
arg_out.* = try args_info.analyzeArg(sema, block, arg_idx, param_ty, func_ty_info, func);
try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg_out.*);
if (sema.typeOf(arg_out.*).zigTypeTag(mod) == .NoReturn) {
if (sema.typeOf(arg_out.*).zigTypeTag(zcu) == .NoReturn) {
return arg_out.*;
}
}
@@ -7955,15 +7852,15 @@ fn analyzeCall(
switch (sema.owner.unwrap()) {
.cau => {},
.func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(mod)) {
.func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) {
ip.funcSetCallsOrAwaitsErrorableFn(owner_func);
},
}
if (try sema.resolveValue(func)) |func_val| {
if (mod.intern_pool.isFuncBody(func_val.toIntern())) {
if (zcu.intern_pool.isFuncBody(func_val.toIntern())) {
try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() }));
try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern());
try zcu.ensureFuncBodyAnalysisQueued(func_val.toIntern());
}
}
@@ -7990,7 +7887,7 @@ fn analyzeCall(
// Function pointers and extern functions aren't guaranteed to
// actually be noreturn so we add a safety check for them.
if (try sema.resolveValue(func)) |func_val| {
switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
switch (zcu.intern_pool.indexToKey(func_val.toIntern())) {
.func => break :skip_safety,
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| if (!ip.getNav(nav).isExtern(ip)) break :skip_safety,
@@ -8210,7 +8107,7 @@ fn instantiateGenericCall(
const fn_nav = ip.getNav(generic_owner_func.owner_nav);
const fn_cau = ip.getCau(fn_nav.analysis_owner.unwrap().?);
const fn_zir = zcu.namespacePtr(fn_cau.namespace).fileScope(zcu).zir;
const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip));
const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip) orelse return error.AnalysisFail);
const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count());
@memset(comptime_args, .none);
@@ -9416,7 +9313,7 @@ fn zirFunc(
break :cau generic_owner_nav.analysis_owner.unwrap().?;
} else sema.owner.unwrap().cau;
const fn_is_exported = exported: {
const decl_inst = ip.getCau(func_decl_cau).zir_index.resolve(ip);
const decl_inst = ip.getCau(func_decl_cau).zir_index.resolve(ip) orelse return error.AnalysisFail;
const zir_decl = sema.code.getDeclaration(decl_inst)[0];
break :exported zir_decl.flags.is_export;
};
@@ -13964,12 +13861,6 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
});
try sema.checkNamespaceType(block, lhs_src, container_type);
if (container_type.typeDeclInst(mod)) |type_decl_inst| {
try sema.declareDependency(.{ .namespace_name = .{
.namespace = type_decl_inst,
.name = decl_name,
} });
}
const namespace = container_type.getNamespace(mod).unwrap() orelse return .bool_false;
if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| {
@@ -14009,7 +13900,10 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
// That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to
// trigger re-analysis later.
try pt.ensureFileAnalyzed(result.file_index);
return Air.internedToRef(zcu.fileRootType(result.file_index));
const ty = zcu.fileRootType(result.file_index);
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(operand_src, ty);
return Air.internedToRef(ty);
}
fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -17673,7 +17567,13 @@ fn zirThis(
_ = extended;
const pt = sema.pt;
const namespace = pt.zcu.namespacePtr(block.namespace);
return Air.internedToRef(namespace.owner_type);
const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type, false);
switch (pt.zcu.intern_pool.indexToKey(new_ty)) {
.struct_type, .union_type, .enum_type => try sema.declareDependency(.{ .interned = new_ty }),
.opaque_type => {},
else => unreachable,
}
return Air.internedToRef(new_ty);
}
fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
@@ -17698,7 +17598,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const msg = msg: {
const name = name: {
// TODO: we should probably store this name in the ZIR to avoid this complexity.
const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod);
const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?;
const tree = file.getTree(sema.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
@@ -17726,7 +17626,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
if (!block.is_typeof and !block.is_comptime and sema.func_index != .none) {
const msg = msg: {
const name = name: {
const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod);
const file, const src_base_node = Module.LazySrcLoc.resolveBaseNode(block.src_base_inst, mod).?;
const tree = file.getTree(sema.gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
@@ -18975,6 +18875,7 @@ fn typeInfoNamespaceDecls(
const ip = &zcu.intern_pool;
const namespace_index = opt_namespace_index.unwrap() orelse return;
try pt.ensureNamespaceUpToDate(namespace_index);
const namespace = zcu.namespacePtr(namespace_index);
const gop = try seen_namespaces.getOrPut(namespace);
@@ -21821,7 +21722,10 @@ fn zirReify(
.zir_index = try block.trackZir(inst),
} },
})) {
.existing => |ty| return Air.internedToRef(ty),
.existing => |ty| {
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
},
.wip => |wip| wip,
};
errdefer wip_ty.cancel(ip, pt.tid);
@@ -21838,8 +21742,10 @@ fn zirReify(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index));
},
.Union => {
@@ -22019,11 +21925,16 @@ fn reifyEnum(
.zir_index = tracked_inst,
.type_hash = hasher.final(),
} },
})) {
}, false)) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
.existing => |ty| {
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
},
};
errdefer wip_ty.cancel(ip, pt.tid);
var done = false;
errdefer if (!done) wip_ty.cancel(ip, pt.tid);
if (tag_ty.zigTypeTag(mod) != .Int) {
return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{});
@@ -22041,12 +21952,16 @@ fn reifyEnum(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
wip_ty.prepare(ip, new_cau_index, new_namespace_index);
wip_ty.setTagTy(ip, tag_ty.toIntern());
done = true;
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(pt, field_idx);
@@ -22181,9 +22096,13 @@ fn reifyUnion(
.zir_index = tracked_inst,
.type_hash = hasher.final(),
} },
})) {
}, false)) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
.existing => |ty| {
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
},
};
errdefer wip_ty.cancel(ip, pt.tid);
@@ -22338,6 +22257,7 @@ fn reifyUnion(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
@@ -22348,7 +22268,8 @@ fn reifyUnion(
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
@@ -22446,9 +22367,13 @@ fn reifyStruct(
.zir_index = tracked_inst,
.type_hash = hasher.final(),
} },
})) {
}, false)) {
.wip => |wip| wip,
.existing => |ty| return Air.internedToRef(ty),
.existing => |ty| {
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
},
};
errdefer wip_ty.cancel(ip, pt.tid);
@@ -22616,6 +22541,7 @@ fn reifyStruct(
.parent = block.namespace.toOptional(),
.owner_type = wip_ty.index,
.file_scope = block.getFileScopeIndex(mod),
.generation = mod.generation,
});
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
@@ -22626,7 +22552,8 @@ fn reifyStruct(
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
try sema.addTypeReferenceEntry(src, wip_ty.index);
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
@@ -26125,7 +26052,7 @@ fn zirVarExtended(
const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 });
const decl_inst, const decl_bodies = decl: {
const decl_inst = sema.getOwnerCauDeclInst().resolve(ip);
const decl_inst = sema.getOwnerCauDeclInst().resolve(ip) orelse return error.AnalysisFail;
const zir_decl, const extra_end = sema.code.getDeclaration(decl_inst);
break :decl .{ decl_inst, zir_decl.getBodies(extra_end, sema.code) };
};
@@ -26354,7 +26281,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :decl_inst cau.zir_index;
} else sema.getOwnerCauDeclInst(); // not an instantiation so we're analyzing a function declaration Cau
const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool))[0];
const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool) orelse return error.AnalysisFail)[0];
if (zir_decl.flags.is_export) {
break :cc .C;
}
@@ -27659,13 +27586,6 @@ fn fieldVal(
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
const child_type = val.toType();
if (child_type.typeDeclInst(mod)) |type_decl_inst| {
try sema.declareDependency(.{ .namespace_name = .{
.namespace = type_decl_inst,
.name = field_name,
} });
}
switch (try child_type.zigTypeTagOrPoison(mod)) {
.ErrorSet => {
switch (ip.indexToKey(child_type.toIntern())) {
@@ -27897,13 +27817,6 @@ fn fieldPtr(
const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
const child_type = val.toType();
if (child_type.typeDeclInst(mod)) |type_decl_inst| {
try sema.declareDependency(.{ .namespace_name = .{
.namespace = type_decl_inst,
.name = field_name,
} });
}
switch (child_type.zigTypeTag(mod)) {
.ErrorSet => {
switch (ip.indexToKey(child_type.toIntern())) {
@@ -32223,7 +32136,7 @@ fn addReferenceEntry(
referenced_unit: AnalUnit,
) !void {
const zcu = sema.pt.zcu;
if (zcu.comp.reference_trace == 0) return;
if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
const gop = try sema.references.getOrPut(sema.gpa, referenced_unit);
if (gop.found_existing) return;
// TODO: we need to figure out how to model inline calls here.
@@ -32232,6 +32145,18 @@ fn addReferenceEntry(
try zcu.addUnitReference(sema.owner, referenced_unit, src);
}
fn addTypeReferenceEntry(
sema: *Sema,
src: LazySrcLoc,
referenced_type: InternPool.Index,
) !void {
const zcu = sema.pt.zcu;
if (!zcu.comp.incremental and zcu.comp.reference_trace == 0) return;
const gop = try sema.type_references.getOrPut(sema.gpa, referenced_type);
if (gop.found_existing) return;
try zcu.addTypeReference(sema.owner, referenced_type, src);
}
pub fn ensureNavResolved(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
@@ -35323,7 +35248,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
if (struct_type.haveLayout(ip))
return;
try ty.resolveFields(pt);
try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
if (struct_type.layout == .@"packed") {
semaBackingIntType(pt, struct_type) catch |err| switch (err) {
@@ -35505,7 +35430,7 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp
break :blk accumulator;
};
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail;
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
@@ -36120,7 +36045,7 @@ fn semaStructFields(
const cau_index = struct_type.cau.unwrap().?;
const namespace_index = ip.getCau(cau_index).namespace;
const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir;
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail;
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
@@ -36343,7 +36268,7 @@ fn semaStructFieldInits(
const cau_index = struct_type.cau.unwrap().?;
const namespace_index = ip.getCau(cau_index).namespace;
const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir;
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip);
const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail;
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa);
@@ -36477,7 +36402,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_ty: InternPool.Ind
const ip = &zcu.intern_pool;
const cau_index = union_type.cau;
const zir = zcu.namespacePtr(union_type.namespace).fileScope(zcu).zir;
const zir_index = union_type.zir_index.resolve(ip);
const zir_index = union_type.zir_index.resolve(ip) orelse return error.AnalysisFail;
const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended;
assert(extended.opcode == .union_decl);
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
@@ -36591,11 +36516,11 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_ty: InternPool.Ind
}
} else {
// The provided type is the enum tag type.
union_type.setTagType(ip, provided_ty.toIntern());
const enum_type = switch (ip.indexToKey(provided_ty.toIntern())) {
.enum_type => ip.loadEnumType(provided_ty.toIntern()),
else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{provided_ty.fmt(pt)}),
};
union_type.setTagType(ip, provided_ty.toIntern());
// The fields of the union must match the enum exactly.
// A flag per field is used to check for missing and extraneous fields.
explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len);
@@ -38223,6 +38148,9 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
const zcu = sema.pt.zcu;
if (!zcu.comp.incremental) return;
const gop = try sema.dependencies.getOrPut(sema.gpa, dependee);
if (gop.found_existing) return;
// Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields
// of a type and they use `@This()`. This dependency would be unnecessary, and in fact would
// just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve
@@ -38446,6 +38374,187 @@ fn getOwnerFuncDeclInst(sema: *Sema) InternPool.TrackedInst.Index {
return ip.getCau(cau).zir_index;
}
/// Called as soon as a `declared` enum type is created.
/// Resolves the tag type and field inits.
/// Marks the `src_inst` dependency on the enum's declaration, so call sites need not do this.
pub fn resolveDeclaredEnum(
pt: Zcu.PerThread,
wip_ty: InternPool.WipEnumType,
inst: Zir.Inst.Index,
tracked_inst: InternPool.TrackedInst.Index,
namespace: InternPool.NamespaceIndex,
type_name: InternPool.NullTerminatedString,
enum_cau: InternPool.Cau.Index,
small: Zir.Inst.EnumDecl.Small,
body: []const Zir.Inst.Index,
tag_type_ref: Zir.Inst.Ref,
any_values: bool,
fields_len: u32,
zir: Zir,
body_end: usize,
) Zcu.CompileError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) };
const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } };
const anal_unit = AnalUnit.wrap(.{ .cau = enum_cau });
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
.pt = pt,
.gpa = gpa,
.arena = arena.allocator(),
.code = zir,
.owner = anal_unit,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
defer sema.deinit();
try sema.declareDependency(.{ .src_hash = tracked_inst });
var block: Block = .{
.parent = null,
.sema = &sema,
.namespace = namespace,
.instructions = .{},
.inlining = null,
.is_comptime = true,
.src_base_inst = tracked_inst,
.type_name_ctx = type_name,
};
defer block.instructions.deinit(gpa);
const int_tag_ty = ty: {
if (body.len != 0) {
_ = try sema.analyzeInlineBody(&block, body, inst);
}
if (tag_type_ref != .none) {
const ty = try sema.resolveType(&block, tag_ty_src, tag_type_ref);
if (ty.zigTypeTag(zcu) != .Int and ty.zigTypeTag(zcu) != .ComptimeInt) {
return sema.fail(&block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)});
}
break :ty ty;
} else if (fields_len == 0) {
break :ty try pt.intType(.unsigned, 0);
} else {
const bits = std.math.log2_int_ceil(usize, fields_len);
break :ty try pt.intType(.unsigned, bits);
}
};
wip_ty.setTagTy(ip, int_tag_ty.toIntern());
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) {
return sema.fail(&block, src, "non-exhaustive enum specifies every value", .{});
}
}
var extra_index = body_end + bit_bags_count;
var bit_bag_index: usize = body_end;
var cur_bit_bag: u32 = undefined;
var last_tag_val: ?Value = null;
for (0..fields_len) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
if (field_i % 32 == 0) {
cur_bit_bag = zir.extra[bit_bag_index];
bit_bag_index += 1;
}
const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
cur_bit_bag >>= 1;
const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]);
const field_name_zir = zir.nullTerminatedString(field_name_index);
extra_index += 2; // field name, doc comment
const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
const value_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = field_i },
};
const tag_overflow = if (has_tag_value) overflow: {
const tag_val_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
extra_index += 1;
const tag_inst = try sema.resolveInst(tag_val_ref);
last_tag_val = try sema.resolveConstDefinedValue(&block, .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_name = field_i },
}, tag_inst, .{
.needed_comptime_reason = "enum tag value must be comptime-known",
});
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const other_field_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = conflict.prev_field_idx },
};
const msg = msg: {
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block, msg);
}
break :overflow false;
} else if (any_values) overflow: {
var overflow: ?usize = null;
last_tag_val = if (last_tag_val) |val|
try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
else
try pt.intValue(int_tag_ty, 0);
if (overflow != null) break :overflow true;
if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| {
assert(conflict.kind == .value); // AstGen validated names are unique
const other_field_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = conflict.prev_field_idx },
};
const msg = msg: {
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)});
errdefer msg.destroy(gpa);
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(&block, msg);
}
break :overflow false;
} else overflow: {
assert(wip_ty.nextField(ip, field_name, .none) == null);
last_tag_val = try pt.intValue(Type.comptime_int, field_i);
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
break :overflow false;
};
if (tag_overflow) {
const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{
last_tag_val.?.fmtValueSema(pt, &sema), int_tag_ty.fmt(pt),
});
return sema.failWithOwnedErrorMsg(&block, msg);
}
}
}
pub const bitCastVal = @import("Sema/bitcast.zig").bitCast;
pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice;
+1 -1
View File
@@ -3437,7 +3437,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
},
else => return null,
};
const info = tracked.resolveFull(&zcu.intern_pool);
const info = tracked.resolveFull(&zcu.intern_pool) orelse return null;
const file = zcu.fileByIndex(info.file);
assert(file.zir_loaded);
const zir = file.zir;
+485 -135
View File
@@ -10,7 +10,7 @@ const builtin = @import("builtin");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.module);
const log = std.log.scoped(.zcu);
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
@@ -153,27 +153,27 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .
/// Maximum amount of distinct error values, set by --error-limit
error_limit: ErrorInt,
/// Value is the number of PO or outdated Decls which this AnalUnit depends on.
/// Value is the number of PO dependencies of this AnalUnit.
/// This value will decrease as we perform semantic analysis to learn what is outdated.
/// If any of these PO deps is outdated, this value will be moved to `outdated`.
potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
/// Value is the number of PO or outdated Decls which this AnalUnit depends on.
/// Value is the number of PO dependencies of this AnalUnit.
/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0.
/// Such `AnalUnit`s are ready for immediate re-analysis.
/// See `findOutdatedToAnalyze` for details.
outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
/// This contains a set of struct types whose corresponding `Cau` may not be in
/// `outdated`, but are the root types of files which have updated source and
/// thus must be re-analyzed. If such a type is only in this set, the struct type
/// index may be preserved (only the namespace might change). If its owned `Cau`
/// is also outdated, the struct type index must be recreated.
outdated_file_root: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
/// This contains a list of AnalUnit whose analysis or codegen failed, but the
/// failure was something like running out of disk space, and trying again may
/// succeed. On the next update, we will flush this list, marking all members of
/// it as outdated.
retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{},
/// These are the modules which we initially queue for analysis in `Compilation.update`.
/// `resolveReferences` will use these as the root of its reachability traversal.
analysis_roots: std.BoundedArray(*Package.Module, 3) = .{},
stage1_flags: packed struct {
have_winmain: bool = false,
have_wwinmain: bool = false,
@@ -192,7 +192,7 @@ global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .{}
/// Key is the `AnalUnit` *performing* the reference. This representation allows
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
/// Value is index into `all_reference` of the first reference triggered by the unit.
/// Value is index into `all_references` of the first reference triggered by the unit.
/// The `next` field on the `Reference` forms a linked list of all references
/// triggered by the key `AnalUnit`.
reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
@@ -200,11 +200,23 @@ all_references: std.ArrayListUnmanaged(Reference) = .{},
/// Freelist of indices in `all_references`.
free_references: std.ArrayListUnmanaged(u32) = .{},
/// Key is the `AnalUnit` *performing* the reference. This representation allows
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
/// Value is index into `all_type_reference` of the first reference triggered by the unit.
/// The `next` field on the `TypeReference` forms a linked list of all type references
/// triggered by the key `AnalUnit`.
type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
all_type_references: std.ArrayListUnmanaged(TypeReference) = .{},
/// Freelist of indices in `all_type_references`.
free_type_references: std.ArrayListUnmanaged(u32) = .{},
panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId.len,
/// The panic function body.
panic_func_index: InternPool.Index = .none,
null_stack_trace: InternPool.Index = .none,
generation: u32 = 0,
pub const PerThread = @import("Zcu/PerThread.zig");
pub const PanicId = enum {
@@ -308,10 +320,21 @@ pub const Reference = struct {
src: LazySrcLoc,
};
pub const TypeReference = struct {
/// The container type which was referenced.
referenced: InternPool.Index,
/// Index into `all_type_references` of the next `TypeReference` triggered by the same `AnalUnit`.
/// `std.math.maxInt(u32)` is the sentinel.
next: u32,
/// The source location of the reference.
src: LazySrcLoc,
};
/// The container that structs, enums, unions, and opaques have.
pub const Namespace = struct {
parent: OptionalIndex,
file_scope: File.Index,
generation: u32,
/// Will be a struct, enum, union, or opaque.
owner_type: InternPool.Index,
/// Members of the namespace which are marked `pub`.
@@ -2022,10 +2045,11 @@ pub const LazySrcLoc = struct {
.offset = .unneeded,
};
pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) struct { *File, Ast.Node.Index } {
/// Returns `null` if the ZIR instruction has been lost across incremental updates.
pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } {
const ip = &zcu.intern_pool;
const file_index, const zir_inst = inst: {
const info = base_node_inst.resolveFull(ip);
const info = base_node_inst.resolveFull(ip) orelse return null;
break :inst .{ info.file, info.inst };
};
const file = zcu.fileByIndex(file_index);
@@ -2051,7 +2075,15 @@ pub const LazySrcLoc = struct {
/// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`.
/// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates.
pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc {
const file, const base_node = resolveBaseNode(lazy.base_node_inst, zcu);
return lazy.upgradeOrLost(zcu).?;
}
/// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates.
pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc {
const file, const base_node: Ast.Node.Index = if (lazy.offset == .entire_file) .{
zcu.fileByIndex(lazy.base_node_inst.resolveFile(&zcu.intern_pool)),
0,
} else resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
return .{
.file_scope = file,
.base_node = base_node,
@@ -2148,7 +2180,6 @@ pub fn deinit(zcu: *Zcu) void {
zcu.potentially_outdated.deinit(gpa);
zcu.outdated.deinit(gpa);
zcu.outdated_ready.deinit(gpa);
zcu.outdated_file_root.deinit(gpa);
zcu.retryable_failures.deinit(gpa);
zcu.test_functions.deinit(gpa);
@@ -2162,6 +2193,10 @@ pub fn deinit(zcu: *Zcu) void {
zcu.all_references.deinit(gpa);
zcu.free_references.deinit(gpa);
zcu.type_reference_table.deinit(gpa);
zcu.all_type_references.deinit(gpa);
zcu.free_type_references.deinit(gpa);
zcu.intern_pool.deinit(gpa);
}
@@ -2255,55 +2290,89 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F
return zir;
}
pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void {
log.debug("outdated dependee: {}", .{dependee});
pub fn markDependeeOutdated(
zcu: *Zcu,
/// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO.
/// However, when we discover during analysis that something was outdated, the `Dependee` was already
/// marked as PO, so we need to decrement the PO dep count for each depender.
marked_po: enum { not_marked_po, marked_po },
dependee: InternPool.Dependee,
) !void {
log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.contains(depender)) {
// We do not need to increment the PO dep count, as if the outdated
// dependee is a Decl, we had already marked this as PO.
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
switch (marked_po) {
.not_marked_po => {},
.marked_po => {
po_dep_count.* -= 1;
log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
if (po_dep_count.* == 0) {
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
},
}
continue;
}
const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender);
const new_po_dep_count = switch (marked_po) {
.not_marked_po => if (opt_po_entry) |e| e.value else 0,
.marked_po => if (opt_po_entry) |e| e.value - 1 else {
// This `AnalUnit` has already been re-analyzed this update, and registered a dependency
// on this thing, but already has sufficiently up-to-date information. Nothing to do.
continue;
},
};
try zcu.outdated.putNoClobber(
zcu.gpa,
depender,
// We do not need to increment this count for the same reason as above.
if (opt_po_entry) |e| e.value else 0,
new_po_dep_count,
);
log.debug("outdated: {}", .{depender});
if (opt_po_entry == null) {
// This is a new entry with no PO dependencies.
log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
if (new_po_dep_count == 0) {
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
// If this is a Decl and was not previously PO, we must recursively
// mark dependencies on its tyval as PO.
if (opt_po_entry == null) {
assert(marked_po == .not_marked_po);
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
}
}
}
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
// This depender is already outdated, but it now has one
// less PO dependency!
po_dep_count.* -= 1;
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
if (po_dep_count.* == 0) {
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
continue;
}
// This depender is definitely at least PO, because this Decl was just analyzed
// due to being outdated.
const ptr = zcu.potentially_outdated.getPtr(depender).?;
const ptr = zcu.potentially_outdated.getPtr(depender) orelse {
// This dependency has been registered during in-progress analysis, but the unit is
// not in `potentially_outdated` because analysis is in-progress. Nothing to do.
continue;
};
if (ptr.* > 1) {
ptr.* -= 1;
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
continue;
}
log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
// This dependency is no longer PO, i.e. is known to be up-to-date.
assert(zcu.potentially_outdated.swapRemove(depender));
// If this is a Decl, we must recursively mark dependencies on its tyval
@@ -2323,14 +2392,16 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES.
fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void {
const ip = &zcu.intern_pool;
var it = ip.dependencyIterator(switch (maybe_outdated.unwrap()) {
const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) {
.cau => |cau| switch (ip.getCau(cau).owner.unwrap()) {
.nav => |nav| .{ .nav_val = nav }, // TODO: also `nav_ref` deps when introduced
.none, .type => return, // analysis of this `Cau` can't outdate any dependencies
.type => |ty| .{ .interned = ty },
.none => return, // analysis of this `Cau` can't outdate any dependencies
},
.func => |func_index| .{ .interned = func_index }, // IES
});
};
log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)});
var it = ip.dependencyIterator(dependee);
while (it.next()) |po| {
if (zcu.outdated.getPtr(po)) |po_dep_count| {
// This dependency is already outdated, but it now has one more PO
@@ -2339,14 +2410,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
_ = zcu.outdated_ready.swapRemove(po);
}
po_dep_count.* += 1;
log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
continue;
}
if (zcu.potentially_outdated.getPtr(po)) |n| {
// There is now one more PO dependency.
n.* += 1;
log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
continue;
}
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
try zcu.markTransitiveDependersPotentiallyOutdated(po);
}
@@ -2355,9 +2429,11 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
if (!zcu.comp.incremental) return null;
if (true) @panic("TODO: findOutdatedToAnalyze");
if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) {
if (zcu.outdated.count() == 0) {
// Any units in `potentially_outdated` must just be stuck in loops with one another: none of those
// units have had any outdated dependencies so far, and all of their remaining PO deps are triggered
// by other units in `potentially_outdated`. So, we can safety assume those units up-to-date.
zcu.potentially_outdated.clearRetainingCapacity();
log.debug("findOutdatedToAnalyze: no outdated depender", .{});
return null;
}
@@ -2372,96 +2448,75 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
// In this case, we must defer to more complex logic below.
if (zcu.outdated_ready.count() > 0) {
log.debug("findOutdatedToAnalyze: trivial '{s} {d}'", .{
@tagName(zcu.outdated_ready.keys()[0].unwrap()),
switch (zcu.outdated_ready.keys()[0].unwrap()) {
inline else => |x| @intFromEnum(x),
},
});
return zcu.outdated_ready.keys()[0];
const unit = zcu.outdated_ready.keys()[0];
log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)});
return unit;
}
// Next, we will see if there is any outdated file root which was not in
// `outdated`. This set will be small (number of files changed in this
// update), so it's alright for us to just iterate here.
for (zcu.outdated_file_root.keys()) |file_decl| {
const decl_depender = AnalUnit.wrap(.{ .decl = file_decl });
if (zcu.outdated.contains(decl_depender)) {
// Since we didn't hit this in the first loop, this Decl must have
// pending dependencies, so is ineligible.
continue;
}
if (zcu.potentially_outdated.contains(decl_depender)) {
// This Decl's struct may or may not need to be recreated depending
// on whether it is outdated. If we analyzed it now, we would have
// to assume it was outdated and recreate it!
continue;
}
log.debug("findOutdatedToAnalyze: outdated file root decl '{d}'", .{file_decl});
return decl_depender;
}
// There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some
// Cau with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of
// A or B. We should select a Cau, since a Cau is definitely responsible for the loop in the
// dependency graph (since IES dependencies can't have loops). We should also, of course, not
// select a Cau owned by a `comptime` declaration, since you can't depend on those!
// There is no single AnalUnit which is ready for re-analysis. Instead, we
// must assume that some Decl with PO dependencies is outdated - e.g. in the
// above example we arbitrarily pick one of A or B. We should select a Decl,
// since a Decl is definitely responsible for the loop in the dependency
// graph (since you can't depend on a runtime function analysis!).
// The choice of this Decl could have a big impact on how much total
// analysis we perform, since if analysis concludes its tyval is unchanged,
// then other PO AnalUnit may be resolved as up-to-date. To hopefully avoid
// doing too much work, let's find a Decl which the most things depend on -
// the idea is that this will resolve a lot of loops (but this is only a
// heuristic).
// The choice of this Cau could have a big impact on how much total analysis we perform, since
// if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit
// may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a Decl
// which the most things depend on - the idea is that this will resolve a lot of loops (but this
// is only a heuristic).
log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{
zcu.outdated.count(),
zcu.potentially_outdated.count(),
});
const Decl = {};
const ip = &zcu.intern_pool;
var chosen_decl_idx: ?Decl.Index = null;
var chosen_decl_dependers: u32 = undefined;
var chosen_cau: ?InternPool.Cau.Index = null;
var chosen_cau_dependers: u32 = undefined;
for (zcu.outdated.keys()) |depender| {
const decl_index = switch (depender.unwrap()) {
.decl => |d| d,
.func => continue,
};
inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| {
for (outdated_units) |unit| {
const cau = switch (unit.unwrap()) {
.cau => |cau| cau,
.func => continue, // a `func` definitely can't be causing the loop so it is a bad choice
};
const cau_owner = ip.getCau(cau).owner;
var n: u32 = 0;
var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index });
while (it.next()) |_| n += 1;
var n: u32 = 0;
var it = ip.dependencyIterator(switch (cau_owner.unwrap()) {
.none => continue, // there can be no dependencies on this `Cau` so it is a terrible choice
.type => |ty| .{ .interned = ty },
.nav => |nav| .{ .nav_val = nav },
});
while (it.next()) |_| n += 1;
if (chosen_decl_idx == null or n > chosen_decl_dependers) {
chosen_decl_idx = decl_index;
chosen_decl_dependers = n;
if (chosen_cau == null or n > chosen_cau_dependers) {
chosen_cau = cau;
chosen_cau_dependers = n;
}
}
}
for (zcu.potentially_outdated.keys()) |depender| {
const decl_index = switch (depender.unwrap()) {
.decl => |d| d,
.func => continue,
};
var n: u32 = 0;
var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index });
while (it.next()) |_| n += 1;
if (chosen_decl_idx == null or n > chosen_decl_dependers) {
chosen_decl_idx = decl_index;
chosen_decl_dependers = n;
if (chosen_cau == null) {
for (zcu.outdated.keys(), zcu.outdated.values()) |o, opod| {
const func = o.unwrap().func;
const nav = zcu.funcInfo(func).owner_nav;
std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
}
for (zcu.potentially_outdated.keys(), zcu.potentially_outdated.values()) |o, opod| {
const func = o.unwrap().func;
const nav = zcu.funcInfo(func).owner_nav;
std.io.getStdErr().writer().print("po: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
}
}
log.debug("findOutdatedToAnalyze: heuristic returned Decl {d} ({d} dependers)", .{
chosen_decl_idx.?,
chosen_decl_dependers,
log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
zcu.fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? })),
chosen_cau_dependers,
});
return AnalUnit.wrap(.{ .decl = chosen_decl_idx.? });
return AnalUnit.wrap(.{ .cau = chosen_cau.? });
}
/// During an incremental update, before semantic analysis, call this to flush all values from
@@ -2506,10 +2561,10 @@ pub fn mapOldZirToNew(
});
// Used as temporary buffers for namespace declaration instructions
var old_decls = std.ArrayList(Zir.Inst.Index).init(gpa);
defer old_decls.deinit();
var new_decls = std.ArrayList(Zir.Inst.Index).init(gpa);
defer new_decls.deinit();
var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
defer old_decls.deinit(gpa);
var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
defer new_decls.deinit(gpa);
while (match_stack.popOrNull()) |match_item| {
// Match the namespace declaration itself
@@ -2583,7 +2638,7 @@ pub fn mapOldZirToNew(
break :inst unnamed_tests.items[unnamed_test_idx];
},
_ => inst: {
const name_nts = new_decl.name.toString(old_zir).?;
const name_nts = new_decl.name.toString(new_zir).?;
const name = new_zir.nullTerminatedString(name_nts);
if (new_decl.name.isNamedTest(new_zir)) {
break :inst named_tests.get(name) orelse continue;
@@ -2596,11 +2651,11 @@ pub fn mapOldZirToNew(
// Match the `declaration` instruction
try inst_map.put(gpa, old_decl_inst, new_decl_inst);
// Find namespace declarations within this declaration
try old_zir.findDecls(&old_decls, old_decl_inst);
try new_zir.findDecls(&new_decls, new_decl_inst);
// Find container type declarations within this declaration
try old_zir.findDecls(gpa, &old_decls, old_decl_inst);
try new_zir.findDecls(gpa, &new_decls, new_decl_inst);
// We don't have any smart way of matching up these namespace declarations, so we always
// We don't have any smart way of matching up these type declarations, so we always
// correlate them based on source order.
const n = @min(old_decls.items.len, new_decls.items.len);
try match_stack.ensureUnusedCapacity(gpa, n);
@@ -2699,16 +2754,32 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
const gpa = zcu.gpa;
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return;
var idx = kv.value;
unit_refs: {
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs;
var idx = kv.value;
while (idx != std.math.maxInt(u32)) {
zcu.free_references.append(gpa, idx) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
return;
};
idx = zcu.all_references.items[idx].next;
while (idx != std.math.maxInt(u32)) {
zcu.free_references.append(gpa, idx) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break :unit_refs;
};
idx = zcu.all_references.items[idx].next;
}
}
type_refs: {
const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs;
var idx = kv.value;
while (idx != std.math.maxInt(u32)) {
zcu.free_type_references.append(gpa, idx) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break :type_refs;
};
idx = zcu.all_type_references.items[idx].next;
}
}
}
@@ -2735,6 +2806,29 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
gop.value_ptr.* = @intCast(ref_idx);
}
pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void {
const gpa = zcu.gpa;
try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1);
const ref_idx = zcu.free_type_references.popOrNull() orelse idx: {
_ = try zcu.all_type_references.addOne(gpa);
break :idx zcu.all_type_references.items.len - 1;
};
errdefer comptime unreachable;
const gop = zcu.type_reference_table.getOrPutAssumeCapacity(src_unit);
zcu.all_type_references.items[ref_idx] = .{
.referenced = referenced_type,
.next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
.src = ref_src,
};
gop.value_ptr.* = @intCast(ref_idx);
}
pub fn errorSetBits(mod: *Zcu) u16 {
if (mod.error_limit == 0) return 0;
return @as(u16, std.math.log2_int(ErrorInt, mod.error_limit)) + 1;
@@ -3029,28 +3123,215 @@ pub const ResolvedReference = struct {
};
/// Returns a mapping from an `AnalUnit` to where it is referenced.
/// TODO: in future, this must be adapted to traverse from roots of analysis. That way, we can
/// use the returned map to determine which units have become unreferenced in an incremental update.
pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) {
/// If the value is `null`, the `AnalUnit` is a root of analysis.
/// If an `AnalUnit` is not in the returned map, it is unreferenced.
pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
const gpa = zcu.gpa;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
var result: std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) = .{};
var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{};
errdefer result.deinit(gpa);
var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .{};
var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{};
defer {
checked_types.deinit(gpa);
type_queue.deinit(gpa);
unit_queue.deinit(gpa);
}
// This is not a sufficient size, but a lower bound.
try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
for (zcu.reference_table.keys(), zcu.reference_table.values()) |referencer, first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_references.items[ref_idx];
const gop = try result.getOrPut(gpa, ref.referenced);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .referencer = referencer, .src = ref.src };
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
for (zcu.analysis_roots.slice()) |mod| {
// Logic ripped from `Zcu.PerThread.importPkg`.
// TODO: this is silly, `Module` should just store a reference to its root `File`.
const resolved_path = try std.fs.path.resolve(gpa, &.{
mod.root.root_dir.path orelse ".",
mod.root.sub_path,
mod.root_src_path,
});
defer gpa.free(resolved_path);
const file = zcu.import_table.get(resolved_path).?;
const root_ty = zcu.fileRootType(file);
if (root_ty == .none) continue;
type_queue.putAssumeCapacityNoClobber(root_ty, null);
}
while (true) {
if (type_queue.popOrNull()) |kv| {
const ty = kv.key;
const referencer = kv.value;
try checked_types.putNoClobber(gpa, ty, {});
log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
// If this type has a `Cau` for resolution, it's automatically referenced.
const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) {
.struct_type => ip.loadStructType(ty).cau,
.union_type => ip.loadUnionType(ty).cau.toOptional(),
.enum_type => ip.loadEnumType(ty).cau,
.opaque_type => .none,
else => unreachable,
};
if (resolution_cau.unwrap()) |cau| {
// this should only be referenced by the type
const unit = AnalUnit.wrap(.{ .cau = cau });
assert(!result.contains(unit));
try unit_queue.putNoClobber(gpa, unit, referencer);
}
ref_idx = ref.next;
// If this is a union with a generated tag, its tag type is automatically referenced.
// We don't add this reference for non-generated tags, as those will already be referenced via the union's `Cau`, with a better source location.
if (zcu.typeToUnion(Type.fromInterned(ty))) |union_obj| {
const tag_ty = union_obj.enum_tag_ty;
if (tag_ty != .none) {
if (ip.indexToKey(tag_ty).enum_type == .generated_tag) {
if (!checked_types.contains(tag_ty)) {
try type_queue.put(gpa, tag_ty, referencer);
}
}
}
}
// Queue any decls within this type which would be automatically analyzed.
// Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
for (zcu.namespacePtr(ns).other_decls.items) |cau| {
// These are `comptime` and `test` declarations.
// `comptime` decls are always analyzed; `test` declarations are analyzed depending on the test filter.
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
// If the file failed AstGen, the TrackedInst refers to the old ZIR.
const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
const declaration = zir.getDeclaration(inst_info.inst)[0];
const want_analysis = switch (declaration.name) {
.@"usingnamespace" => unreachable,
.@"comptime" => true,
else => a: {
if (!comp.config.is_test) break :a false;
if (file.mod != zcu.main_mod) break :a false;
if (declaration.name.isNamedTest(zir) or declaration.name == .decltest) {
const nav = ip.getCau(cau).owner.unwrap().nav;
const fqn_slice = ip.getNav(nav).fqn.toSlice(ip);
for (comp.test_filters) |test_filter| {
if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break;
} else break :a false;
}
break :a true;
},
};
if (want_analysis) {
const unit = AnalUnit.wrap(.{ .cau = cau });
if (!result.contains(unit)) {
log.debug("type '{}': ref cau %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
try unit_queue.put(gpa, unit, referencer);
}
}
}
for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| {
// These are named declarations. They are analyzed only if marked `export`.
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
// If the file failed AstGen, the TrackedInst refers to the old ZIR.
const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
const declaration = zir.getDeclaration(inst_info.inst)[0];
if (declaration.flags.is_export) {
const unit = AnalUnit.wrap(.{ .cau = cau });
if (!result.contains(unit)) {
log.debug("type '{}': ref cau %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
try unit_queue.put(gpa, unit, referencer);
}
}
}
for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| {
// These are named declarations. They are analyzed only if marked `export`.
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
// If the file failed AstGen, the TrackedInst refers to the old ZIR.
const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
const declaration = zir.getDeclaration(inst_info.inst)[0];
if (declaration.flags.is_export) {
const unit = AnalUnit.wrap(.{ .cau = cau });
if (!result.contains(unit)) {
log.debug("type '{}': ref cau %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
try unit_queue.put(gpa, unit, referencer);
}
}
}
// Incremental compilation does not support `usingnamespace`.
// These are only included to keep good reference traces in non-incremental updates.
for (zcu.namespacePtr(ns).pub_usingnamespace.items) |nav| {
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
const unit = AnalUnit.wrap(.{ .cau = cau });
if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
}
for (zcu.namespacePtr(ns).priv_usingnamespace.items) |nav| {
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
const unit = AnalUnit.wrap(.{ .cau = cau });
if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
}
continue;
}
if (unit_queue.popOrNull()) |kv| {
const unit = kv.key;
try result.putNoClobber(gpa, unit, kv.value);
log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)});
if (zcu.reference_table.get(unit)) |first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_references.items[ref_idx];
if (!result.contains(ref.referenced)) {
log.debug("unit '{}': ref unit '{}'", .{
zcu.fmtAnalUnit(unit),
zcu.fmtAnalUnit(ref.referenced),
});
try unit_queue.put(gpa, ref.referenced, .{
.referencer = unit,
.src = ref.src,
});
}
ref_idx = ref.next;
}
}
if (zcu.type_reference_table.get(unit)) |first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_type_references.items[ref_idx];
if (!checked_types.contains(ref.referenced)) {
log.debug("unit '{}': ref type '{}'", .{
zcu.fmtAnalUnit(unit),
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
});
try type_queue.put(gpa, ref.referenced, .{
.referencer = unit,
.src = ref.src,
});
}
ref_idx = ref.next;
}
}
continue;
}
break;
}
return result;
@@ -3093,7 +3374,7 @@ pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
const ip = &zcu.intern_pool;
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip);
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
const zir = zcu.fileByIndex(inst_info.file).zir;
const inst = zir.instructions.get(@intFromEnum(inst_info.inst));
assert(inst.tag == .declaration);
@@ -3106,7 +3387,7 @@ pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value {
pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index {
const ip = &zcu.intern_pool;
return ip.getNav(nav).srcInst(ip).resolveFull(ip).file;
return ip.getNav(nav).srcInst(ip).resolveFile(ip);
}
pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
@@ -3115,6 +3396,75 @@ pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File {
const ip = &zcu.intern_pool;
const file_index = ip.getCau(cau).zir_index.resolveFull(ip).file;
const file_index = ip.getCau(cau).zir_index.resolveFile(ip);
return zcu.fileByIndex(file_index);
}
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) {
return .{ .data = .{ .unit = unit, .zcu = zcu } };
}
pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) {
return .{ .data = .{ .dependee = d, .zcu = zcu } };
}
fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = .{ fmt, options };
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.unit.unwrap()) {
.cau => |cau_index| {
const cau = ip.getCau(cau_index);
switch (cau.owner.unwrap()) {
.nav => |nav| return writer.print("cau(decl='{}')", .{ip.getNav(nav).fqn.fmt(ip)}),
.type => |ty| return writer.print("cau(ty='{}')", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}),
.none => if (cau.zir_index.resolveFull(ip)) |resolved| {
const file_path = zcu.fileByIndex(resolved.file).sub_file_path;
return writer.print("cau(inst=('{s}', %{}))", .{ file_path, @intFromEnum(resolved.inst) });
} else {
return writer.writeAll("cau(inst=<lost>)");
},
}
},
.func => |func| {
const nav = zcu.funcInfo(func).owner_nav;
return writer.print("func('{}')", .{ip.getNav(nav).fqn.fmt(ip)});
},
}
}
fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = .{ fmt, options };
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.dependee) {
.src_hash => |ti| {
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("inst(<lost>)");
};
const file_path = zcu.fileByIndex(info.file).sub_file_path;
return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
},
.nav_val => |nav| {
const fqn = ip.getNav(nav).fqn;
return writer.print("nav('{}')", .{fqn.fmt(ip)});
},
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
.func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
else => unreachable,
},
.namespace => |ti| {
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("namespace(<lost>)");
};
const file_path = zcu.fileByIndex(info.file).sub_file_path;
return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
},
.namespace_name => |k| {
const info = k.namespace.resolveFull(ip) orelse {
return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
};
const file_path = zcu.fileByIndex(info.file).sub_file_path;
return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) });
},
}
}
+901 -321
View File
@@ -1,3 +1,6 @@
//! This type provides a wrapper around a `*Zcu` for uses which require a thread `Id`.
//! Any operation which mutates `InternPool` state lives here rather than on `Zcu`.
zcu: *Zcu,
/// Dense, per-thread unique index.
@@ -39,7 +42,6 @@ pub fn astGenFile(
pt: Zcu.PerThread,
file: *Zcu.File,
path_digest: Cache.BinDigest,
old_root_type: InternPool.Index,
) !void {
dev.check(.ast_gen);
assert(!file.mod.isBuiltin());
@@ -299,25 +301,15 @@ pub fn astGenFile(
file.status = .astgen_failure;
return error.AnalysisFail;
}
if (old_root_type != .none) {
// The root of this file must be re-analyzed, since the file has changed.
comp.mutex.lock();
defer comp.mutex.unlock();
log.debug("outdated file root type: {}", .{old_root_type});
try zcu.outdated_file_root.put(gpa, old_root_type, {});
}
}
const UpdatedFile = struct {
file_index: Zcu.File.Index,
file: *Zcu.File,
inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
};
fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.ArrayListUnmanaged(UpdatedFile)) void {
for (updated_files.items) |*elem| elem.inst_map.deinit(gpa);
fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile)) void {
for (updated_files.values()) |*elem| elem.inst_map.deinit(gpa);
updated_files.deinit(gpa);
}
@@ -328,143 +320,166 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
const gpa = zcu.gpa;
// We need to visit every updated File for every TrackedInst in InternPool.
var updated_files: std.ArrayListUnmanaged(UpdatedFile) = .{};
var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .{};
defer cleanupUpdatedFiles(gpa, &updated_files);
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
const old_zir = file.prev_zir orelse continue;
const new_zir = file.zir;
try updated_files.append(gpa, .{
.file_index = file_index,
const gop = try updated_files.getOrPut(gpa, file_index);
assert(!gop.found_existing);
gop.value_ptr.* = .{
.file = file,
.inst_map = .{},
});
const inst_map = &updated_files.items[updated_files.items.len - 1].inst_map;
try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, inst_map);
};
if (!new_zir.hasCompileErrors()) {
try Zcu.mapOldZirToNew(gpa, old_zir.*, file.zir, &gop.value_ptr.inst_map);
}
}
if (updated_files.items.len == 0)
if (updated_files.count() == 0)
return;
for (ip.locals, 0..) |*local, tid| {
const tracked_insts_list = local.getMutableTrackedInsts(gpa);
for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| {
for (updated_files.items) |updated_file| {
const file_index = updated_file.file_index;
if (tracked_inst.file != file_index) continue;
for (tracked_insts_list.viewAllowEmpty().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| {
const file_index = tracked_inst.file;
const updated_file = updated_files.get(file_index) orelse continue;
const file = updated_file.file;
const old_zir = file.prev_zir.?.*;
const new_zir = file.zir;
const old_tag = old_zir.instructions.items(.tag);
const old_data = old_zir.instructions.items(.data);
const inst_map = &updated_file.inst_map;
const file = updated_file.file;
const old_inst = tracked_inst.inst;
const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{
.tid = @enumFromInt(tid),
.index = @intCast(tracked_inst_unwrapped_index),
}).wrap(ip);
tracked_inst.inst = inst_map.get(old_inst) orelse {
// Tracking failed for this instruction. Invalidate associated `src_hash` deps.
log.debug("tracking failed for %{d}", .{old_inst});
try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
continue;
};
if (file.zir.hasCompileErrors()) {
// If we mark this as outdated now, users of this inst will just get a transitive analysis failure.
// Ultimately, they would end up throwing out potentially useful analysis results.
// So, do nothing. We already have the file failure -- that's sufficient for now!
continue;
}
const old_inst = tracked_inst.inst.unwrap() orelse continue; // we can't continue tracking lost insts
const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{
.tid = @enumFromInt(tid),
.index = @intCast(tracked_inst_unwrapped_index),
}).wrap(ip);
const new_inst = updated_file.inst_map.get(old_inst) orelse {
// Tracking failed for this instruction. Invalidate associated `src_hash` deps.
log.debug("tracking failed for %{d}", .{old_inst});
tracked_inst.inst = .lost;
try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
continue;
};
tracked_inst.inst = InternPool.TrackedInst.MaybeLost.ZirIndex.wrap(new_inst);
if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: {
if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| {
if (std.zig.srcHashEql(old_hash, new_hash)) {
break :hash_changed;
}
log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{
old_inst,
tracked_inst.inst,
std.fmt.fmtSliceHexLower(&old_hash),
std.fmt.fmtSliceHexLower(&new_hash),
});
const old_zir = file.prev_zir.?.*;
const new_zir = file.zir;
const old_tag = old_zir.instructions.items(.tag);
const old_data = old_zir.instructions.items(.data);
if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: {
if (new_zir.getAssociatedSrcHash(new_inst)) |new_hash| {
if (std.zig.srcHashEql(old_hash, new_hash)) {
break :hash_changed;
}
// The source hash associated with this instruction changed - invalidate relevant dependencies.
try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{
old_inst,
new_inst,
std.fmt.fmtSliceHexLower(&old_hash),
std.fmt.fmtSliceHexLower(&new_hash),
});
}
// The source hash associated with this instruction changed - invalidate relevant dependencies.
try zcu.markDependeeOutdated(.not_marked_po, .{ .src_hash = tracked_inst_index });
}
// If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) {
.extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) {
.struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
else => false,
},
// If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) {
.extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) {
.struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
else => false,
};
if (!has_namespace) continue;
},
else => false,
};
if (!has_namespace) continue;
var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer old_names.deinit(zcu.gpa);
{
var it = old_zir.declIterator(old_inst);
while (it.next()) |decl_inst| {
const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(old_zir)) continue,
}
const name_zir = decl_name.toString(old_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
try old_names.put(zcu.gpa, name_ip, {});
var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer old_names.deinit(zcu.gpa);
{
var it = old_zir.declIterator(old_inst);
while (it.next()) |decl_inst| {
const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(old_zir)) continue,
}
const name_zir = decl_name.toString(old_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
try old_names.put(zcu.gpa, name_ip, {});
}
var any_change = false;
{
var it = new_zir.declIterator(tracked_inst.inst);
while (it.next()) |decl_inst| {
const decl_name = new_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(new_zir)) continue,
}
const name_zir = decl_name.toString(new_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
new_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
if (!old_names.swapRemove(name_ip)) continue;
// Name added
any_change = true;
try zcu.markDependeeOutdated(.{ .namespace_name = .{
.namespace = tracked_inst_index,
.name = name_ip,
} });
}
var any_change = false;
{
var it = new_zir.declIterator(new_inst);
while (it.next()) |decl_inst| {
const decl_name = new_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(new_zir)) continue,
}
}
// The only elements remaining in `old_names` now are any names which were removed.
for (old_names.keys()) |name_ip| {
const name_zir = decl_name.toString(new_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
new_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
if (old_names.swapRemove(name_ip)) continue;
// Name added
any_change = true;
try zcu.markDependeeOutdated(.{ .namespace_name = .{
try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{
.namespace = tracked_inst_index,
.name = name_ip,
} });
}
}
// The only elements remaining in `old_names` now are any names which were removed.
for (old_names.keys()) |name_ip| {
any_change = true;
try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace_name = .{
.namespace = tracked_inst_index,
.name = name_ip,
} });
}
if (any_change) {
try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index });
}
if (any_change) {
try zcu.markDependeeOutdated(.not_marked_po, .{ .namespace = tracked_inst_index });
}
}
}
for (updated_files.items) |updated_file| {
try ip.rehashTrackedInsts(gpa, pt.tid);
for (updated_files.keys(), updated_files.values()) |file_index, updated_file| {
const file = updated_file.file;
const prev_zir = file.prev_zir.?;
file.prev_zir = null;
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
if (file.zir.hasCompileErrors()) {
// Keep `prev_zir` around: it's the last non-error ZIR.
// Don't update the namespace, as we have no new data to update *to*.
} else {
const prev_zir = file.prev_zir.?;
file.prev_zir = null;
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
// For every file which has changed, re-scan the namespace of the file's root struct type.
// These types are special-cased because they don't have an enclosing declaration which will
// be re-analyzed (causing the struct's namespace to be re-scanned). It's fine to do this
// now because this work is fast (no actual Sema work is happening, we're just updating the
// namespace contents). We must do this after updating ZIR refs above, since `scanNamespace`
// will track some instructions.
try pt.updateFileNamespace(file_index);
}
}
}
@@ -473,8 +488,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
const file_root_type = pt.zcu.fileRootType(file_index);
if (file_root_type != .none) {
const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?;
return pt.ensureCauAnalyzed(file_root_type_cau);
_ = try pt.ensureTypeUpToDate(file_root_type, false);
} else {
return pt.semaFile(file_index);
}
@@ -491,9 +505,8 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
const anal_unit = AnalUnit.wrap(.{ .cau = cau_index });
const cau = ip.getCau(cau_index);
const inst_info = cau.zir_index.resolveFull(ip);
log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)});
@@ -514,37 +527,7 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
if (cau_outdated) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
}
// TODO: this only works if namespace lookups in Sema trigger `ensureCauAnalyzed`, because
// `outdated_file_root` information is not "viral", so we need that a namespace lookup first
// handles the case where the file root is not an outdated *type* but does have an outdated
// *namespace*. A more logically simple alternative may be for a file's root struct to register
// a dependency on the file's entire source code (hash). Alternatively, we could make sure that
// these are always handled first in an update. Actually, that's probably the best option.
// For my own benefit, here's how a namespace update for a normal (non-file-root) type works:
// `const S = struct { ... };`
// We are adding or removing a declaration within this `struct`.
// * `S` registers a dependency on `.{ .src_hash = (declaration of S) }`
// * Any change to the `struct` body -- including changing a declaration -- invalidates this
// * `S` is re-analyzed, but notes:
// * there is an existing struct instance (at this `TrackedInst` with these captures)
// * the struct's `Cau` is up-to-date (because nothing about the fields changed)
// * so, it uses the same `struct`
// * but this doesn't stop it from updating the namespace!
// * we basically do `scanDecls`, updating the namespace as needed
// * TODO: optimize this to make sure we only do it once a generation i guess?
// * so everyone lived happily ever after
const file_root_outdated = switch (cau.owner.unwrap()) {
.type => |ty| zcu.outdated_file_root.swapRemove(ty),
.nav, .none => false,
};
if (zcu.fileByIndex(inst_info.file).status != .success_zir) {
return error.AnalysisFail;
}
if (!cau_outdated and !file_root_outdated) {
} else {
// We can trust the current information about this `Cau`.
if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
return error.AnalysisFail;
@@ -560,6 +543,97 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
}
}
const sema_result: SemaCauResult, const analysis_fail = if (pt.ensureCauAnalyzedInner(cau_index, cau_outdated)) |result|
.{ result, false }
else |err| switch (err) {
error.AnalysisFail => res: {
if (!zcu.failed_analysis.contains(anal_unit)) {
// If this `Cau` caused the error, it would have an entry in `failed_analysis`.
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
}
// We treat errors as up-to-date, since those uses would just trigger a transitive error.
// The exception is types, since type declarations may require re-analysis if the type, e.g. its captures, changed.
const outdated = cau.owner.unwrap() == .type;
break :res .{ .{
.invalidate_decl_val = outdated,
.invalidate_decl_ref = outdated,
}, true };
},
error.OutOfMemory => res: {
try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
try zcu.retryable_failures.ensureUnusedCapacity(gpa, 1);
const msg = try Zcu.ErrorMsg.create(
gpa,
.{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) },
"unable to analyze: OutOfMemory",
.{},
);
zcu.retryable_failures.appendAssumeCapacity(anal_unit);
zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, msg);
// We treat errors as up-to-date, since those uses would just trigger a transitive error
break :res .{ .{
.invalidate_decl_val = false,
.invalidate_decl_ref = false,
}, true };
},
};
if (cau_outdated) {
// TODO: we do not yet have separate dependencies for decl values vs types.
const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref;
const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) {
.none => return, // there are no dependencies on a `comptime` decl!
.nav => |nav_index| .{ .nav_val = nav_index },
.type => |ty| .{ .interned = ty },
};
if (invalidate) {
// This dependency was marked as PO, meaning dependees were waiting
// on its analysis result, and it has turned out to be outdated.
// Update dependees accordingly.
try zcu.markDependeeOutdated(.marked_po, dependee);
} else {
// This dependency was previously PO, but turned out to be up-to-date.
// We do not need to queue successive analysis.
try zcu.markPoDependeeUpToDate(dependee);
}
}
if (analysis_fail) return error.AnalysisFail;
}
fn ensureCauAnalyzedInner(
pt: Zcu.PerThread,
cau_index: InternPool.Cau.Index,
cau_outdated: bool,
) Zcu.SemaError!SemaCauResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const cau = ip.getCau(cau_index);
const anal_unit = AnalUnit.wrap(.{ .cau = cau_index });
const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
// TODO: document this elsewhere mlugg!
// For my own benefit, here's how a namespace update for a normal (non-file-root) type works:
// `const S = struct { ... };`
// We are adding or removing a declaration within this `struct`.
// * `S` registers a dependency on `.{ .src_hash = (declaration of S) }`
// * Any change to the `struct` body -- including changing a declaration -- invalidates this
// * `S` is re-analyzed, but notes:
// * there is an existing struct instance (at this `TrackedInst` with these captures)
// * the struct's `Cau` is up-to-date (because nothing about the fields changed)
// * so, it uses the same `struct`
// * but this doesn't stop it from updating the namespace!
// * we basically do `scanDecls`, updating the namespace as needed
// * so everyone lived happily ever after
if (zcu.fileByIndex(inst_info.file).status != .success_zir) {
return error.AnalysisFail;
}
// `cau_outdated` can be true in the initial update for `comptime` declarations,
// so this isn't a `dev.check`.
if (cau_outdated and dev.env.supports(.incremental)) {
@@ -567,73 +641,23 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
// prior to re-analysis.
zcu.deleteUnitExports(anal_unit);
zcu.deleteUnitReferences(anal_unit);
}
const sema_result: SemaCauResult = res: {
if (inst_info.inst == .main_struct_inst) {
const changed = try pt.semaFileUpdate(inst_info.file, cau_outdated);
break :res .{
.invalidate_decl_val = changed,
.invalidate_decl_ref = changed,
};
if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
kv.value.destroy(zcu.gpa);
}
const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) {
.nav => |nav| ip.getNav(nav).fqn.toSlice(ip),
.type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.none => "comptime",
}, 0);
defer decl_prog_node.end();
break :res pt.semaCau(cau_index) catch |err| switch (err) {
error.AnalysisFail => {
if (!zcu.failed_analysis.contains(anal_unit)) {
// If this `Cau` caused the error, it would have an entry in `failed_analysis`.
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
}
return error.AnalysisFail;
},
error.GenericPoison => unreachable,
error.ComptimeBreak => unreachable,
error.ComptimeReturn => unreachable,
error.OutOfMemory => {
try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
try zcu.retryable_failures.append(gpa, anal_unit);
zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, try Zcu.ErrorMsg.create(
gpa,
.{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) },
"unable to analyze: OutOfMemory",
.{},
));
return error.AnalysisFail;
},
};
};
if (!cau_outdated) {
// We definitely don't need to do any dependency tracking, so our work is done.
return;
_ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
}
// TODO: we do not yet have separate dependencies for decl values vs types.
const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref;
const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) {
.none => return, // there are no dependencies on a `comptime` decl!
.nav => |nav_index| .{ .nav_val = nav_index },
.type => |ty| .{ .interned = ty },
};
const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) {
.nav => |nav| ip.getNav(nav).fqn.toSlice(ip),
.type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.none => "comptime",
}, 0);
defer decl_prog_node.end();
if (invalidate) {
// This dependency was marked as PO, meaning dependees were waiting
// on its analysis result, and it has turned out to be outdated.
// Update dependees accordingly.
try zcu.markDependeeOutdated(dependee);
} else {
// This dependency was previously PO, but turned out to be up-to-date.
// We do not need to queue successive analysis.
try zcu.markPoDependeeUpToDate(dependee);
}
return pt.semaCau(cau_index) catch |err| switch (err) {
error.GenericPoison, error.ComptimeBreak, error.ComptimeReturn => unreachable,
error.AnalysisFail, error.OutOfMemory => |e| return e,
};
}
pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: InternPool.Index) Zcu.SemaError!void {
@@ -653,6 +677,63 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)});
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
const func_outdated = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
if (func_outdated) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
} else {
// We can trust the current information about this function.
if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
return error.AnalysisFail;
}
switch (func.analysisUnordered(ip).state) {
.unreferenced => {}, // this is the first reference
.queued => {}, // we're waiting on first-time analysis
.analyzed => return, // up-to-date
}
}
const ies_outdated, const analysis_fail = if (pt.ensureFuncBodyAnalyzedInner(func_index, func_outdated)) |result|
.{ result.ies_outdated, false }
else |err| switch (err) {
error.AnalysisFail => res: {
if (!zcu.failed_analysis.contains(anal_unit)) {
// If this function caused the error, it would have an entry in `failed_analysis`.
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
}
break :res .{ false, true }; // we treat errors as up-to-date IES, since those uses would just trigger a transitive error
},
error.OutOfMemory => return error.OutOfMemory, // TODO: graceful handling like `ensureCauAnalyzed`
};
if (func_outdated) {
if (ies_outdated) {
log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index });
} else {
log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
try zcu.markPoDependeeUpToDate(.{ .interned = func_index });
}
}
if (analysis_fail) return error.AnalysisFail;
}
fn ensureFuncBodyAnalyzedInner(
pt: Zcu.PerThread,
func_index: InternPool.Index,
func_outdated: bool,
) Zcu.SemaError!struct { ies_outdated: bool } {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const func = zcu.funcInfo(func_index);
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
// Here's an interesting question: is this function actually valid?
// Maybe the signature changed, so we'll end up creating a whole different `func`
// in the InternPool, and this one is a waste of time to analyze. Worse, we'd be
@@ -672,8 +753,10 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
});
if (ip.isRemoved(func_index) or (func.generic_owner != .none and ip.isRemoved(func.generic_owner))) {
try zcu.markDependeeOutdated(.{ .interned = func_index }); // IES
ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
if (func_outdated) {
try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES
}
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index }));
ip.remove(pt.tid, func_index);
@panic("TODO: remove orphaned function from binary");
}
@@ -685,15 +768,14 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
else
.none;
const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
const func_outdated = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
if (func_outdated) {
dev.check(.incremental);
_ = zcu.outdated_ready.swapRemove(anal_unit);
zcu.deleteUnitExports(anal_unit);
zcu.deleteUnitReferences(anal_unit);
if (zcu.failed_analysis.fetchSwapRemove(anal_unit)) |kv| {
kv.value.destroy(gpa);
}
_ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
}
if (!func_outdated) {
@@ -704,7 +786,7 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
switch (func.analysisUnordered(ip).state) {
.unreferenced => {}, // this is the first reference
.queued => {}, // we're waiting on first-time analysis
.analyzed => return, // up-to-date
.analyzed => return .{ .ies_outdated = false }, // up-to-date
}
}
@@ -713,28 +795,11 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
if (func_outdated) "outdated" else "never analyzed",
});
var air = pt.analyzeFnBody(func_index) catch |err| switch (err) {
error.AnalysisFail => {
if (!zcu.failed_analysis.contains(anal_unit)) {
// If this function caused the error, it would have an entry in `failed_analysis`.
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
}
return error.AnalysisFail;
},
error.OutOfMemory => return error.OutOfMemory,
};
var air = try pt.analyzeFnBody(func_index);
errdefer air.deinit(gpa);
if (func_outdated) {
if (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies) {
log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
try zcu.markDependeeOutdated(.{ .interned = func_index });
} else {
log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
try zcu.markPoDependeeUpToDate(.{ .interned = func_index });
}
}
const ies_outdated = func_outdated and
(!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies);
const comp = zcu.comp;
@@ -743,13 +808,15 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
if (comp.bin_file == null and zcu.llvm_object == null and !dump_air and !dump_llvm_ir) {
air.deinit(gpa);
return;
return .{ .ies_outdated = ies_outdated };
}
try comp.queueJob(.{ .codegen_func = .{
.func = func_index,
.air = air,
} });
return .{ .ies_outdated = ies_outdated };
}
/// Takes ownership of `air`, even on error.
@@ -824,7 +891,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
"unable to codegen: {s}",
.{@errorName(err)},
));
try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
},
};
} else if (zcu.llvm_object) |llvm_object| {
@@ -848,6 +915,7 @@ fn createFileRootStruct(
pt: Zcu.PerThread,
file_index: Zcu.File.Index,
namespace_index: Zcu.Namespace.Index,
replace_existing: bool,
) Allocator.Error!InternPool.Index {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -891,7 +959,7 @@ fn createFileRootStruct(
.zir_index = tracked_inst,
.captures = &.{},
} },
})) {
}, replace_existing)) {
.existing => unreachable, // we wouldn't be analysing the file root if this type existed
.wip => |wip| wip,
};
@@ -904,7 +972,7 @@ fn createFileRootStruct(
if (zcu.comp.incremental) {
try ip.addDependency(
gpa,
InternPool.AnalUnit.wrap(.{ .cau = new_cau_index }),
AnalUnit.wrap(.{ .cau = new_cau_index }),
.{ .src_hash = tracked_inst },
);
}
@@ -920,66 +988,42 @@ fn createFileRootStruct(
return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
}
/// Re-analyze the root type of a file on an incremental update.
/// If `type_outdated`, the struct type itself is considered outdated and is
/// reconstructed at a new InternPool index. Otherwise, the namespace is just
/// re-analyzed. Returns whether the decl's tyval was invalidated.
/// Returns `error.AnalysisFail` if the file has an error.
fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool {
/// Re-scan the namespace of a file's root struct type on an incremental update.
/// The file must have successfully populated ZIR.
/// If the file's root struct type is not populated (the file is unreferenced), nothing is done.
/// This is called by `updateZirRefs` for all updated files before the main work loop.
/// This function does not perform any semantic analysis.
fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.Error!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const file = zcu.fileByIndex(file_index);
assert(file.status == .success_zir);
const file_root_type = zcu.fileRootType(file_index);
const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu);
if (file_root_type == .none) return;
assert(file_root_type != .none);
log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{
log.debug("updateFileNamespace mod={s} sub_file_path={s}", .{
file.mod.fully_qualified_name,
file.sub_file_path,
type_outdated,
});
if (file.status != .success_zir) {
return error.AnalysisFail;
}
const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu);
const decls = decls: {
const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
if (type_outdated) {
// Invalidate the existing type, reusing its namespace.
const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?;
ip.removeDependenciesForDepender(
zcu.gpa,
InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }),
);
ip.remove(pt.tid, file_root_type);
_ = try pt.createFileRootStruct(file_index, namespace_index);
return true;
}
// Only the struct's namespace is outdated.
// Preserve the type - just scan the namespace again.
const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
const decls = file.zir.bodySlice(extra_index, decls_len);
if (!type_outdated) {
try pt.scanNamespace(namespace_index, decls);
}
return false;
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
break :decls file.zir.bodySlice(extra_index, decls_len);
};
try pt.scanNamespace(namespace_index, decls);
zcu.namespacePtr(namespace_index).generation = zcu.generation;
}
/// Regardless of the file status, will create a `Decl` if none exists so that we can track
/// dependencies and re-analyze when the file becomes outdated.
fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
const tracy = trace(@src());
defer tracy.end();
@@ -998,8 +1042,9 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
.parent = .none,
.owner_type = undefined, // set in `createFileRootStruct`
.file_scope = file_index,
.generation = zcu.generation,
});
const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index);
const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false);
errdefer zcu.intern_pool.remove(pt.tid, struct_ty);
switch (zcu.comp.cache_use) {
@@ -1049,10 +1094,10 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
const anal_unit = AnalUnit.wrap(.{ .cau = cau_index });
const cau = ip.getCau(cau_index);
const inst_info = cau.zir_index.resolveFull(ip);
const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
const zir = file.zir;
@@ -1071,9 +1116,10 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
},
.type => |ty| {
// This is an incremental update, and this type is being re-analyzed because it is outdated.
// The type must be recreated at a new `InternPool.Index`.
// Remove it from the InternPool and mark it outdated so that creation sites are re-analyzed.
ip.remove(pt.tid, ty);
// Create a new type in its place, and mark the old one as outdated so that use sites will
// be re-analyzed and discover an up-to-date type.
const new_ty = try pt.ensureTypeUpToDate(ty, true);
assert(new_ty != ty);
return .{
.invalidate_decl_val = true,
.invalidate_decl_ref = true,
@@ -1919,21 +1965,25 @@ const ScanDeclIter = struct {
.@"comptime" => cau: {
const cau = existing_cau orelse try ip.createComptimeCau(gpa, pt.tid, tracked_inst, namespace_index);
// For a `comptime` declaration, whether to re-analyze is based solely on whether the
// `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already.
const unit = InternPool.AnalUnit.wrap(.{ .cau = cau });
if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| {
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value);
if (kv.value == 0) { // no PO deps
try namespace.other_decls.append(gpa, cau);
if (existing_cau == null) {
// For a `comptime` declaration, whether to analyze is based solely on whether the
// `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already.
const unit = AnalUnit.wrap(.{ .cau = cau });
if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| {
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value);
if (kv.value == 0) { // no PO deps
zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
}
} else if (!zcu.outdated.contains(unit)) {
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
zcu.outdated.putAssumeCapacityNoClobber(unit, 0);
zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
}
} else if (!zcu.outdated.contains(unit)) {
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
zcu.outdated.putAssumeCapacityNoClobber(unit, 0);
zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
}
break :cau .{ cau, true };
@@ -1951,6 +2001,9 @@ const ScanDeclIter = struct {
const want_analysis = switch (kind) {
.@"comptime" => unreachable,
.@"usingnamespace" => a: {
if (comp.incremental) {
@panic("'usingnamespace' is not supported by incremental compilation");
}
if (declaration.flags.is_pub) {
try namespace.pub_usingnamespace.append(gpa, nav);
} else {
@@ -1989,7 +2042,7 @@ const ScanDeclIter = struct {
},
};
if (want_analysis or declaration.flags.is_export) {
if (existing_cau == null and (want_analysis or declaration.flags.is_export)) {
log.debug(
"scanDecl queue analyze_cau file='{s}' cau_index={d}",
.{ namespace.fileScope(zcu).sub_file_path, cau },
@@ -2009,9 +2062,9 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
const func = zcu.funcInfo(func_index);
const inst_info = func.zir_body_inst.resolveFull(ip);
const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
const zir = file.zir;
@@ -2097,7 +2150,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
};
defer inner_block.instructions.deinit(gpa);
const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip));
const fn_info = sema.code.getFnInfo(func.zirBodyInstUnordered(ip).resolve(ip) orelse return error.AnalysisFail);
// Here we are performing "runtime semantic analysis" for a function body, which means
// we must map the parameter ZIR instructions to `arg` AIR instructions.
@@ -2395,7 +2448,7 @@ fn processExportsInner(
const nav = ip.getNav(nav_index);
if (zcu.failed_codegen.contains(nav_index)) break :failed true;
if (nav.analysis_owner.unwrap()) |cau| {
const cau_unit = InternPool.AnalUnit.wrap(.{ .cau = cau });
const cau_unit = AnalUnit.wrap(.{ .cau = cau });
if (zcu.failed_analysis.contains(cau_unit)) break :failed true;
if (zcu.transitive_failed_analysis.contains(cau_unit)) break :failed true;
}
@@ -2405,7 +2458,7 @@ fn processExportsInner(
};
// If the value is a function, we also need to check if that function succeeded analysis.
if (val.typeOf(zcu).zigTypeTag(zcu) == .Fn) {
const func_unit = InternPool.AnalUnit.wrap(.{ .func = val.toIntern() });
const func_unit = AnalUnit.wrap(.{ .func = val.toIntern() });
if (zcu.failed_analysis.contains(func_unit)) break :failed true;
if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true;
}
@@ -2580,7 +2633,7 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void
.{@errorName(err)},
));
if (nav.analysis_owner.unwrap()) |cau| {
try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = cau }));
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .cau = cau }));
} else {
// TODO: we don't have a way to indicate that this failure is retryable!
// Since these are really rare, we could as a cop-out retry the whole build next update.
@@ -2693,7 +2746,7 @@ pub fn reportRetryableFileError(
gop.value_ptr.* = err_msg;
}
/// Shortcut for calling `intern_pool.get`.
///Shortcut for calling `intern_pool.get`.
pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index {
return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key);
}
@@ -3278,6 +3331,532 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo
return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt);
}
/// Given a container type requiring resolution, ensures that it is up-to-date.
/// If not, the type is recreated at a new `InternPool.Index`.
/// The new index is returned. This is the same as the old index if the fields were up-to-date.
/// If `already_updating` is set, assumes the type is already outdated and undergoing re-analysis rather than checking `zcu.outdated`.
pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index, already_updating: bool) Zcu.SemaError!InternPool.Index {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty)) {
.struct_type => |key| {
const struct_obj = ip.loadStructType(ty);
const outdated = already_updating or o: {
const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? });
const o = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
if (o) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
}
break :o o;
};
if (!outdated) return ty;
return pt.recreateStructType(ty, key, struct_obj);
},
.union_type => |key| {
const union_obj = ip.loadUnionType(ty);
const outdated = already_updating or o: {
const anal_unit = AnalUnit.wrap(.{ .cau = union_obj.cau });
const o = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
if (o) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
}
break :o o;
};
if (!outdated) return ty;
return pt.recreateUnionType(ty, key, union_obj);
},
.enum_type => |key| {
const enum_obj = ip.loadEnumType(ty);
const outdated = already_updating or o: {
const anal_unit = AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? });
const o = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
if (o) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
}
break :o o;
};
if (!outdated) return ty;
return pt.recreateEnumType(ty, key, enum_obj);
},
.opaque_type => {
assert(!already_updating);
return ty;
},
else => unreachable,
}
}
fn recreateStructType(
pt: Zcu.PerThread,
ty: InternPool.Index,
full_key: InternPool.Key.NamespaceType,
struct_obj: InternPool.LoadedStructType,
) Zcu.SemaError!InternPool.Index {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const key = switch (full_key) {
.reified => unreachable, // never outdated
.empty_struct => unreachable, // never outdated
.generated_tag => unreachable, // not a struct
.declared => |d| d,
};
if (@intFromEnum(ty) <= InternPool.static_len) {
@panic("TODO: recreate resolved builtin type");
}
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
var extra_index = extra.end;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
if (fields_len != struct_obj.field_types.len) return error.AnalysisFail;
// The old type will be unused, so drop its dependency information.
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? }));
const namespace_index = struct_obj.namespace.unwrap().?;
const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
.layout = small.layout,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
.is_tuple = small.is_tuple,
.any_comptime_fields = small.any_comptime_fields,
.any_default_inits = small.any_default_inits,
.inits_resolved = false,
.any_aligned_fields = small.any_aligned_fields,
.key = .{ .declared_owned_captures = .{
.zir_index = key.zir_index,
.captures = key.captures.owned,
} },
}, true)) {
.wip => |wip| wip,
.existing => unreachable, // we passed `replace_existing`
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, struct_obj.name);
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
try ip.addDependency(
gpa,
AnalUnit.wrap(.{ .cau = new_cau_index }),
.{ .src_hash = key.zir_index },
);
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
// No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive.
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
if (inst_info.inst == .main_struct_inst) {
// This is the root type of a file! Update the reference.
zcu.setFileRootType(inst_info.file, new_ty);
}
return new_ty;
}
fn recreateUnionType(
pt: Zcu.PerThread,
ty: InternPool.Index,
full_key: InternPool.Key.NamespaceType,
union_obj: InternPool.LoadedUnionType,
) Zcu.SemaError!InternPool.Index {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const key = switch (full_key) {
.reified => unreachable, // never outdated
.empty_struct => unreachable, // never outdated
.generated_tag => unreachable, // not a union
.declared => |d| d,
};
if (@intFromEnum(ty) <= InternPool.static_len) {
@panic("TODO: recreate resolved builtin type");
}
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
assert(extended.opcode == .union_decl);
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index = extra.end;
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
if (fields_len != union_obj.field_types.len) return error.AnalysisFail;
// The old type will be unused, so drop its dependency information.
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = union_obj.cau }));
const namespace_index = union_obj.namespace;
const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{
.flags = .{
.layout = small.layout,
.status = .none,
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
.tagged
else if (small.layout != .auto)
.none
else switch (true) { // TODO
true => .safety,
false => .none,
},
.any_aligned_fields = small.any_aligned_fields,
.requires_comptime = .unknown,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
.alignment = .none,
},
.fields_len = fields_len,
.enum_tag_ty = .none, // set later
.field_types = &.{}, // set later
.field_aligns = &.{}, // set later
.key = .{ .declared_owned_captures = .{
.zir_index = key.zir_index,
.captures = key.captures.owned,
} },
}, true)) {
.wip => |wip| wip,
.existing => unreachable, // we passed `replace_existing`
};
errdefer wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, union_obj.name);
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
try ip.addDependency(
gpa,
AnalUnit.wrap(.{ .cau = new_cau_index }),
.{ .src_hash = key.zir_index },
);
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
// No need to re-scan the namespace -- `zirUnionDecl` will ultimately do that if the type is still alive.
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
}
fn recreateEnumType(
pt: Zcu.PerThread,
ty: InternPool.Index,
full_key: InternPool.Key.NamespaceType,
enum_obj: InternPool.LoadedEnumType,
) Zcu.SemaError!InternPool.Index {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const key = switch (full_key) {
.reified => unreachable, // never outdated
.empty_struct => unreachable, // never outdated
.generated_tag => unreachable, // never outdated
.declared => |d| d,
};
if (@intFromEnum(ty) <= InternPool.static_len) {
@panic("TODO: recreate resolved builtin type");
}
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
assert(extended.opcode == .enum_decl);
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index = extra.end;
const tag_type_ref = if (small.has_tag_type) blk: {
const tag_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
extra_index += 1;
break :blk tag_type_ref;
} else .none;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :blk body_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
if (fields_len != enum_obj.names.len) return error.AnalysisFail;
extra_index += captures_len;
extra_index += decls_len;
const body = zir.bodySlice(extra_index, body_len);
extra_index += body.len;
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
const body_end = extra_index;
extra_index += bit_bags_count;
const any_values = for (zir.extra[body_end..][0..bit_bags_count]) |bag| {
if (bag != 0) break true;
} else false;
// The old type will be unused, so drop its dependency information.
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? }));
const namespace_index = enum_obj.namespace;
const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{
.has_values = any_values,
.tag_mode = if (small.nonexhaustive)
.nonexhaustive
else if (tag_type_ref == .none)
.auto
else
.explicit,
.fields_len = fields_len,
.key = .{ .declared_owned_captures = .{
.zir_index = key.zir_index,
.captures = key.captures.owned,
} },
}, true)) {
.wip => |wip| wip,
.existing => unreachable, // we passed `replace_existing`
};
var done = true;
errdefer if (!done) wip_ty.cancel(ip, pt.tid);
wip_ty.setName(ip, enum_obj.name);
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
// No need to re-scan the namespace -- `zirEnumDecl` will ultimately do that if the type is still alive.
wip_ty.prepare(ip, new_cau_index, namespace_index);
done = true;
Sema.resolveDeclaredEnum(
pt,
wip_ty,
inst_info.inst,
key.zir_index,
namespace_index,
enum_obj.name,
new_cau_index,
small,
body,
tag_type_ref,
any_values,
fields_len,
zir,
body_end,
) catch |err| switch (err) {
error.GenericPoison => unreachable,
error.ComptimeBreak => unreachable,
error.ComptimeReturn => unreachable,
error.AnalysisFail, error.OutOfMemory => |e| return e,
};
return wip_ty.index;
}
/// Given a namespace, re-scan its declarations from the type definition if they have not
/// yet been re-scanned on this update.
/// If the type declaration instruction has been lost, returns `error.AnalysisFail`.
/// This will effectively short-circuit the caller, which will be semantic analysis of a
/// guaranteed-unreferenced `AnalUnit`, to trigger a transitive analysis error.
pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) Zcu.SemaError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const namespace = zcu.namespacePtr(namespace_index);
if (namespace.generation == zcu.generation) return;
const Container = enum { @"struct", @"union", @"enum", @"opaque" };
const container: Container, const full_key = switch (ip.indexToKey(namespace.owner_type)) {
.struct_type => |k| .{ .@"struct", k },
.union_type => |k| .{ .@"union", k },
.enum_type => |k| .{ .@"enum", k },
.opaque_type => |k| .{ .@"opaque", k },
else => unreachable, // namespaces are owned by a container type
};
const key = switch (full_key) {
.reified, .empty_struct, .generated_tag => {
// Namespace always empty, so up-to-date.
namespace.generation = zcu.generation;
return;
},
.declared => |d| d,
};
// Namespace outdated -- re-scan the type if necessary.
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
if (file.status != .success_zir) return error.AnalysisFail;
const zir = file.zir;
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
const decls = switch (container) {
.@"struct" => decls: {
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
var extra_index = extra.end;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1; // backing_int_body_len
if (backing_int_body_len == 0) {
extra_index += 1; // backing_int_ref
} else {
extra_index += backing_int_body_len; // backing_int_body_inst
}
}
break :decls zir.bodySlice(extra_index, decls_len);
},
.@"union" => decls: {
assert(extended.opcode == .union_decl);
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
var extra_index = extra.end;
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
break :decls zir.bodySlice(extra_index, decls_len);
},
.@"enum" => decls: {
assert(extended.opcode == .enum_decl);
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
var extra_index = extra.end;
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
break :decls zir.bodySlice(extra_index, decls_len);
},
.@"opaque" => decls: {
assert(extended.opcode == .opaque_decl);
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
const extra = zir.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index = extra.end;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
extra_index += captures_len;
break :decls zir.bodySlice(extra_index, decls_len);
},
};
try pt.scanNamespace(namespace_index, decls);
namespace.generation = zcu.generation;
}
const Air = @import("../Air.zig");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -3290,6 +3869,7 @@ const builtin = @import("builtin");
const Cache = std.Build.Cache;
const dev = @import("../dev.zig");
const InternPool = @import("../InternPool.zig");
const AnalUnit = InternPool.AnalUnit;
const isUpDir = @import("../introspect.zig").isUpDir;
const Liveness = @import("../Liveness.zig");
const log = std.log.scoped(.zcu);
+1 -1
View File
@@ -98,7 +98,7 @@ pub fn generateLazyFunction(
debug_output: DebugInfoOutput,
) CodeGenError!Result {
const zcu = pt.zcu;
const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(&zcu.intern_pool).file;
const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(&zcu.intern_pool);
const target = zcu.fileByIndex(file).mod.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
+1 -1
View File
@@ -2585,7 +2585,7 @@ pub fn genTypeDecl(
const ty = Type.fromInterned(index);
_ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
try writer.writeByte(';');
const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file;
const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip);
if (!zcu.fileByIndex(file_scope).mod.strip) try writer.print(" /* {} */", .{
ty.containerTypeName(ip).fmt(ip),
});
+3 -3
View File
@@ -1959,7 +1959,7 @@ pub const Object = struct {
);
}
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip));
const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace|
try o.namespaceToDebugScope(parent_namespace)
else
@@ -2137,7 +2137,7 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip));
const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace|
try o.namespaceToDebugScope(parent_namespace)
else
@@ -2772,7 +2772,7 @@ pub const Object = struct {
fn makeEmptyNamespaceDebugType(o: *Object, ty: Type) !Builder.Metadata {
const zcu = o.pt.zcu;
const ip = &zcu.intern_pool;
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip));
const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace|
try o.namespaceToDebugScope(parent_namespace)
else
+14 -2
View File
@@ -78,7 +78,13 @@ fn dumpStatusReport() !void {
const block: *Sema.Block = anal.block;
const zcu = anal.sema.pt.zcu;
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu);
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse {
const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool));
try stderr.writeAll("Analyzing lost instruction in file '");
try writeFilePath(file, stderr);
try stderr.writeAll("'. This should not happen!\n\n");
return;
};
try stderr.writeAll("Analyzing ");
try writeFilePath(file, stderr);
@@ -104,7 +110,13 @@ fn dumpStatusReport() !void {
while (parent) |curr| {
fba.reset();
try stderr.writeAll(" in ");
const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu);
const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
try writeFilePath(cur_block_file, stderr);
try stderr.writeAll("\n > [lost instruction; this should not happen]\n");
parent = curr.parent;
continue;
};
try writeFilePath(cur_block_file, stderr);
try stderr.writeAll("\n > ");
print_zir.renderSingleInstruction(
+11 -11
View File
@@ -786,7 +786,7 @@ const Entry = struct {
const ip = &zcu.intern_pool;
for (dwarf.types.keys(), dwarf.types.values()) |ty, other_entry| {
const ty_unit: Unit.Index = if (Type.fromInterned(ty).typeDeclInst(zcu)) |inst_index|
dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod) catch unreachable
dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod) catch unreachable
else
.main;
if (sec.getUnit(ty_unit) == unit and unit.getEntry(other_entry) == entry)
@@ -796,7 +796,7 @@ const Entry = struct {
});
}
for (dwarf.navs.keys(), dwarf.navs.values()) |nav, other_entry| {
const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFull(ip).file).mod) catch unreachable;
const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFile(ip)).mod) catch unreachable;
if (sec.getUnit(nav_unit) == unit and unit.getEntry(other_entry) == entry)
log.err("missing Nav({}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) });
}
@@ -1201,7 +1201,7 @@ pub const WipNav = struct {
const ip = &zcu.intern_pool;
const maybe_inst_index = ty.typeDeclInst(zcu);
const unit = if (maybe_inst_index) |inst_index|
try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod)
try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod)
else
.main;
const gop = try wip_nav.dwarf.types.getOrPut(wip_nav.dwarf.gpa, ty.toIntern());
@@ -1539,7 +1539,7 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In
const nav = ip.getNav(nav_index);
log.debug("initWipNav({})", .{nav.fqn.fmt(ip)});
const inst_info = nav.srcInst(ip).resolveFull(ip);
const inst_info = nav.srcInst(ip).resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
const unit = try dwarf.getUnit(file.mod);
@@ -1874,7 +1874,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
const nav = ip.getNav(nav_index);
log.debug("updateComptimeNav({})", .{nav.fqn.fmt(ip)});
const inst_info = nav.srcInst(ip).resolveFull(ip);
const inst_info = nav.srcInst(ip).resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
assert(file.zir_loaded);
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
@@ -1937,7 +1937,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
};
break :value_inst value_inst;
};
const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip);
const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?;
if (type_inst_info.inst != value_inst) break :decl_struct;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
@@ -2053,7 +2053,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
};
break :value_inst value_inst;
};
const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip);
const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip).?;
if (type_inst_info.inst != value_inst) break :decl_enum;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
@@ -2127,7 +2127,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
};
break :value_inst value_inst;
};
const type_inst_info = loaded_union.zir_index.resolveFull(ip);
const type_inst_info = loaded_union.zir_index.resolveFull(ip).?;
if (type_inst_info.inst != value_inst) break :decl_union;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
@@ -2240,7 +2240,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
};
break :value_inst value_inst;
};
const type_inst_info = loaded_opaque.zir_index.resolveFull(ip);
const type_inst_info = loaded_opaque.zir_index.resolveFull(ip).?;
if (type_inst_info.inst != value_inst) break :decl_opaque;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
@@ -2704,7 +2704,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
const ty = Type.fromInterned(type_index);
log.debug("updateContainerType({}({d}))", .{ ty.fmt(pt), @intFromEnum(type_index) });
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip);
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
if (inst_info.inst == .main_struct_inst) {
const unit = try dwarf.getUnit(file.mod);
@@ -2922,7 +2922,7 @@ pub fn updateNavLineNumber(dwarf: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.I
const ip = &zcu.intern_pool;
const zir_index = ip.getCau(ip.getNav(nav_index).analysis_owner.unwrap() orelse return).zir_index;
const inst_info = zir_index.resolveFull(ip);
const inst_info = zir_index.resolveFull(ip).?;
assert(inst_info.inst != .main_struct_inst);
const file = zcu.fileByIndex(inst_info.file);
+3 -2
View File
@@ -3257,9 +3257,12 @@ fn buildOutputType(
else => false,
};
const incremental = opt_incremental orelse false;
const disable_lld_caching = !output_to_cache;
const cache_mode: Compilation.CacheMode = b: {
if (incremental) break :b .incremental;
if (disable_lld_caching) break :b .incremental;
if (!create_module.resolved_options.have_zcu) break :b .whole;
@@ -3272,8 +3275,6 @@ fn buildOutputType(
break :b .incremental;
};
const incremental = opt_incremental orelse false;
process.raiseFileDescriptorLimit();
var file_system_inputs: std.ArrayListUnmanaged(u8) = .{};
+59
View File
@@ -0,0 +1,59 @@
#target=x86_64-linux
#update=initial version
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(foo);
}
const foo = "good morning\n";
#expect_stdout="good morning\n"
#update=add new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(foo);
}
const foo = "good morning\n";
const bar = "good evening\n";
#expect_stdout="good morning\n"
#update=reference new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(bar);
}
const foo = "good morning\n";
const bar = "good evening\n";
#expect_stdout="good evening\n"
#update=reference missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
#expect_error=ignored
#update=add missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
const qux = "good night\n";
#expect_stdout="good night\n"
#update=remove unused declarations
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(qux);
}
const qux = "good night\n";
#expect_stdout="good night\n"
+59
View File
@@ -0,0 +1,59 @@
#target=x86_64-linux
#update=initial version
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(@This().foo);
}
const foo = "good morning\n";
#expect_stdout="good morning\n"
#update=add new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(@This().foo);
}
const foo = "good morning\n";
const bar = "good evening\n";
#expect_stdout="good morning\n"
#update=reference new declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(@This().bar);
}
const foo = "good morning\n";
const bar = "good evening\n";
#expect_stdout="good evening\n"
#update=reference missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(@This().qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
#expect_error=ignored
#update=add missing declaration
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(@This().qux);
}
const foo = "good morning\n";
const bar = "good evening\n";
const qux = "good night\n";
#expect_stdout="good night\n"
#update=remove unused declarations
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(@This().qux);
}
const qux = "good night\n";
#expect_stdout="good night\n"
+38
View File
@@ -0,0 +1,38 @@
#target=x86_64-linux
#update=initial version
#file=main.zig
pub fn main() void {}
comptime {
var array = [_:0]u8{ 1, 2, 3, 4 };
const src_slice: [:0]u8 = &array;
const slice = src_slice[2..6];
_ = slice;
}
comptime {
var array = [_:0]u8{ 1, 2, 3, 4 };
const slice = array[2..6];
_ = slice;
}
comptime {
var array = [_]u8{ 1, 2, 3, 4 };
const slice = array[2..5];
_ = slice;
}
comptime {
var array = [_:0]u8{ 1, 2, 3, 4 };
const slice = array[3..2];
_ = slice;
}
#expect_error=ignored
#update=delete and modify comptime decls
#file=main.zig
pub fn main() void {}
comptime {
const x: [*c]u8 = null;
var runtime_len: usize = undefined;
runtime_len = 0;
const y = x[0..runtime_len];
_ = y;
}
#expect_error=ignored
+38
View File
@@ -0,0 +1,38 @@
#target=x86_64-linux
#update=initial version
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(a);
}
const a = "Hello, World!\n";
#expect_stdout="Hello, World!\n"
#update=introduce compile error
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(a);
}
const a = @compileError("bad a");
#expect_error=ignored
#update=remove error reference
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(b);
}
const a = @compileError("bad a");
const b = "Hi there!\n";
#expect_stdout="Hi there!\n"
#update=introduce and remove reference to error
#file=main.zig
const std = @import("std");
pub fn main() !void {
try std.io.getStdOut().writeAll(a);
}
const a = "Back to a\n";
const b = @compileError("bad b");
#expect_stdout="Back to a\n"
+190 -17
View File
@@ -2,14 +2,55 @@ const std = @import("std");
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const usage = "usage: incr-check <zig binary path> <input file> [--zig-lib-dir lib] [--debug-zcu] [--emit none|bin|c] [--zig-cc-binary /path/to/zig]";
const EmitMode = enum {
none,
bin,
c,
};
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
const args = try std.process.argsAlloc(arena);
const zig_exe = args[1];
const input_file_name = args[2];
var opt_zig_exe: ?[]const u8 = null;
var opt_input_file_name: ?[]const u8 = null;
var opt_lib_dir: ?[]const u8 = null;
var opt_cc_zig: ?[]const u8 = null;
var emit: EmitMode = .bin;
var debug_zcu = false;
var arg_it = try std.process.argsWithAllocator(arena);
_ = arg_it.skip();
while (arg_it.next()) |arg| {
if (arg.len > 0 and arg[0] == '-') {
if (std.mem.eql(u8, arg, "--emit")) {
const emit_str = arg_it.next() orelse fatal("expected arg after '--emit'\n{s}", .{usage});
emit = std.meta.stringToEnum(EmitMode, emit_str) orelse
fatal("invalid emit mode '{s}'\n{s}", .{ emit_str, usage });
} else if (std.mem.eql(u8, arg, "--zig-lib-dir")) {
opt_lib_dir = arg_it.next() orelse fatal("expected arg after '--zig-lib-dir'\n{s}", .{usage});
} else if (std.mem.eql(u8, arg, "--debug-zcu")) {
debug_zcu = true;
} else if (std.mem.eql(u8, arg, "--zig-cc-binary")) {
opt_cc_zig = arg_it.next() orelse fatal("expect arg after '--zig-cc-binary'\n{s}", .{usage});
} else {
fatal("unknown option '{s}'\n{s}", .{ arg, usage });
}
continue;
}
if (opt_zig_exe == null) {
opt_zig_exe = arg;
} else if (opt_input_file_name == null) {
opt_input_file_name = arg;
} else {
fatal("unknown argument '{s}'\n{s}", .{ arg, usage });
}
}
const zig_exe = opt_zig_exe orelse fatal("missing path to zig\n{s}", .{usage});
const input_file_name = opt_input_file_name orelse fatal("missing input file\n{s}", .{usage});
const input_file_bytes = try std.fs.cwd().readFileAlloc(arena, input_file_name, std.math.maxInt(u32));
const case = try Case.parse(arena, input_file_bytes);
@@ -24,13 +65,18 @@ pub fn main() !void {
const child_prog_node = prog_node.start("zig build-exe", 0);
defer child_prog_node.end();
var child = std.process.Child.init(&.{
// Convert incr-check-relative path to subprocess-relative path.
try std.fs.path.relative(arena, tmp_dir_path, zig_exe),
// Convert paths to be relative to the cwd of the subprocess.
const resolved_zig_exe = try std.fs.path.relative(arena, tmp_dir_path, zig_exe);
const opt_resolved_lib_dir = if (opt_lib_dir) |lib_dir|
try std.fs.path.relative(arena, tmp_dir_path, lib_dir)
else
null;
var child_args: std.ArrayListUnmanaged([]const u8) = .{};
try child_args.appendSlice(arena, &.{
resolved_zig_exe,
"build-exe",
case.root_source_file,
"-fno-llvm",
"-fno-lld",
"-fincremental",
"-target",
case.target_query,
@@ -39,8 +85,20 @@ pub fn main() !void {
"--global-cache-dir",
".global_cache",
"--listen=-",
}, arena);
});
if (opt_resolved_lib_dir) |resolved_lib_dir| {
try child_args.appendSlice(arena, &.{ "--zig-lib-dir", resolved_lib_dir });
}
switch (emit) {
.bin => try child_args.appendSlice(arena, &.{ "-fno-llvm", "-fno-lld" }),
.none => try child_args.append(arena, "-fno-emit-bin"),
.c => try child_args.appendSlice(arena, &.{ "-ofmt=c", "-lc" }),
}
if (debug_zcu) {
try child_args.appendSlice(arena, &.{ "--debug-log", "zcu" });
}
var child = std.process.Child.init(child_args.items, arena);
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
@@ -48,12 +106,33 @@ pub fn main() !void {
child.cwd_dir = tmp_dir;
child.cwd = tmp_dir_path;
var cc_child_args: std.ArrayListUnmanaged([]const u8) = .{};
if (emit == .c) {
const resolved_cc_zig_exe = if (opt_cc_zig) |cc_zig_exe|
try std.fs.path.relative(arena, tmp_dir_path, cc_zig_exe)
else
resolved_zig_exe;
try cc_child_args.appendSlice(arena, &.{
resolved_cc_zig_exe,
"cc",
"-target",
case.target_query,
"-I",
opt_resolved_lib_dir orelse fatal("'--zig-lib-dir' required when using '--emit c'", .{}),
"-o",
});
}
var eval: Eval = .{
.arena = arena,
.case = case,
.tmp_dir = tmp_dir,
.tmp_dir_path = tmp_dir_path,
.child = &child,
.allow_stderr = debug_zcu,
.emit = emit,
.cc_child_args = &cc_child_args,
};
try child.spawn();
@@ -65,9 +144,16 @@ pub fn main() !void {
defer poller.deinit();
for (case.updates) |update| {
var update_node = prog_node.start(update.name, 0);
defer update_node.end();
if (debug_zcu) {
std.log.info("=== START UPDATE '{s}' ===", .{update.name});
}
eval.write(update);
try eval.requestUpdate();
try eval.check(&poller, update);
try eval.check(&poller, update, update_node);
}
try eval.end(&poller);
@@ -81,6 +167,11 @@ const Eval = struct {
tmp_dir: std.fs.Dir,
tmp_dir_path: []const u8,
child: *std.process.Child,
allow_stderr: bool,
emit: EmitMode,
/// When `emit == .c`, this contains the first few arguments to `zig cc` to build the generated binary.
/// The arguments `out.c in.c` must be appended before spawning the subprocess.
cc_child_args: *std.ArrayListUnmanaged([]const u8),
const StreamEnum = enum { stdout, stderr };
const Poller = std.io.Poller(StreamEnum);
@@ -102,7 +193,7 @@ const Eval = struct {
}
}
fn check(eval: *Eval, poller: *Poller, update: Case.Update) !void {
fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void {
const arena = eval.arena;
const Header = std.zig.Server.Message.Header;
const stdout = poller.fifo(.stdout);
@@ -136,9 +227,18 @@ const Eval = struct {
};
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data});
if (eval.allow_stderr) {
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
} else {
fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data});
}
}
if (result_error_bundle.errorMessageCount() == 0) {
// Empty bundle indicates successful update in a `-fno-emit-bin` build.
try eval.checkSuccessOutcome(update, null, prog_node);
} else {
try eval.checkErrorOutcome(update, result_error_bundle);
}
try eval.checkErrorOutcome(update, result_error_bundle);
// This message indicates the end of the update.
stdout.discard(body.len);
return;
@@ -150,9 +250,13 @@ const Eval = struct {
const result_binary = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data});
if (eval.allow_stderr) {
std.log.info("emit_bin_path included stderr:\n{s}", .{stderr_data});
} else {
fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data});
}
}
try eval.checkSuccessOutcome(update, result_binary);
try eval.checkSuccessOutcome(update, result_binary, prog_node);
// This message indicates the end of the update.
stdout.discard(body.len);
return;
@@ -166,7 +270,11 @@ const Eval = struct {
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
if (eval.allow_stderr) {
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data });
} else {
fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
}
}
waitChild(eval.child);
@@ -191,12 +299,28 @@ const Eval = struct {
}
}
fn checkSuccessOutcome(eval: *Eval, update: Case.Update, binary_path: []const u8) !void {
fn checkSuccessOutcome(eval: *Eval, update: Case.Update, opt_emitted_path: ?[]const u8, prog_node: std.Progress.Node) !void {
switch (update.outcome) {
.unknown => return,
.compile_errors => fatal("expected compile errors but compilation incorrectly succeeded", .{}),
.stdout, .exit_code => {},
}
const emitted_path = opt_emitted_path orelse {
std.debug.assert(eval.emit == .none);
return;
};
const binary_path = switch (eval.emit) {
.none => unreachable,
.bin => emitted_path,
.c => bin: {
const rand_int = std.crypto.random.int(u64);
const out_bin_name = "./out_" ++ std.fmt.hex(rand_int);
try eval.buildCOutput(update, emitted_path, out_bin_name, prog_node);
break :bin out_bin_name;
},
};
const result = std.process.Child.run(.{
.allocator = eval.arena,
.argv = &.{binary_path},
@@ -266,6 +390,50 @@ const Eval = struct {
fatal("unexpected stderr:\n{s}", .{stderr_data});
}
}
fn buildCOutput(eval: *Eval, update: Case.Update, c_path: []const u8, out_path: []const u8, prog_node: std.Progress.Node) !void {
std.debug.assert(eval.cc_child_args.items.len > 0);
const child_prog_node = prog_node.start("build cbe output", 0);
defer child_prog_node.end();
try eval.cc_child_args.appendSlice(eval.arena, &.{ out_path, c_path });
defer eval.cc_child_args.items.len -= 2;
const result = std.process.Child.run(.{
.allocator = eval.arena,
.argv = eval.cc_child_args.items,
.cwd_dir = eval.tmp_dir,
.cwd = eval.tmp_dir_path,
.progress_node = child_prog_node,
}) catch |err| {
fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{
update.name, c_path, @errorName(err),
});
};
switch (result.term) {
.Exited => |code| if (code != 0) {
if (result.stderr.len != 0) {
std.log.err("update '{s}': zig cc stderr:\n{s}", .{
update.name, result.stderr,
});
}
fatal("update '{s}': zig cc for '{s}' failed with code {d}", .{
update.name, c_path, code,
});
},
.Signal, .Stopped, .Unknown => {
if (result.stderr.len != 0) {
std.log.err("update '{s}': zig cc stderr:\n{s}", .{
update.name, result.stderr,
});
}
fatal("update '{s}': zig cc for '{s}' terminated unexpectedly", .{
update.name, c_path,
});
},
}
}
};
const Case = struct {
@@ -357,6 +525,11 @@ const Case = struct {
fatal("line {d}: bad string literal: {s}", .{ line_n, @errorName(err) });
},
};
} else if (std.mem.eql(u8, key, "expect_error")) {
if (updates.items.len == 0) fatal("line {d}: expect directive before update", .{line_n});
const last_update = &updates.items[updates.items.len - 1];
if (last_update.outcome != .unknown) fatal("line {d}: conflicting expect directive", .{line_n});
last_update.outcome = .{ .compile_errors = &.{} };
} else {
fatal("line {d}: unrecognized key '{s}'", .{ line_n, key });
}