Files
zig/src/Zcu.zig
T
2026-04-16 19:21:16 +02:00

5396 lines
234 KiB
Zig

//! Zig Compilation Unit
//!
//! Compilation of all Zig source code is represented by one `Zcu`.
//!
//! Each `Compilation` has exactly one or zero `Zcu`, depending on whether
//! there is or is not any zig source code, respectively.
const Zcu = @This();
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const Writer = std.Io.Writer;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.zcu);
const deps_log = std.log.scoped(.zcu_deps);
const refs_log = std.log.scoped(.zcu_refs);
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
const Ast = std.zig.Ast;
const Compilation = @import("Compilation.zig");
const Cache = std.Build.Cache;
pub const Value = @import("Value.zig");
pub const Type = @import("Type.zig");
const Package = @import("Package.zig");
const link = @import("link.zig");
const Air = @import("Air.zig");
const Zir = std.zig.Zir;
const trace = @import("tracy.zig").trace;
const AstGen = std.zig.AstGen;
const Sema = @import("Sema.zig");
const target_util = @import("target.zig");
const build_options = @import("build_options");
const isUpDir = @import("introspect.zig").isUpDir;
const InternPool = @import("InternPool.zig");
const Alignment = InternPool.Alignment;
const AnalUnit = InternPool.AnalUnit;
const BuiltinFn = std.zig.BuiltinFn;
const codegen = @import("codegen.zig");
const LlvmObject = @import("codegen/llvm.zig").Object;
const dev = @import("dev.zig");
const Zoir = std.zig.Zoir;
const ZonGen = std.zig.ZonGen;
comptime {
@setEvalBranchQuota(4000);
for (
@typeInfo(Zir.Inst.Ref).@"enum".fields,
@typeInfo(Air.Inst.Ref).@"enum".fields,
@typeInfo(InternPool.Index).@"enum".fields,
) |zir_field, air_field, ip_field| {
assert(mem.eql(u8, zir_field.name, ip_field.name));
assert(mem.eql(u8, air_field.name, ip_field.name));
}
}
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: Allocator,
comp: *Compilation,
/// If the ZCU is emitting an LLVM object (i.e. we are using the LLVM backend), then this is the
/// `LlvmObject` we are emitting to.
llvm_object: ?LlvmObject.Ptr,
/// Pointer to externally managed resource.
root_mod: *Package.Module,
/// Normally, `main_mod` and `root_mod` are the same. The exception is `zig test`, in which
/// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests.
main_mod: *Package.Module,
std_mod: *Package.Module,
sema_prog_node: std.Progress.Node = .none,
codegen_prog_node: std.Progress.Node = .none,
/// The number of codegen jobs which are pending or in-progress. Whichever thread drops this value
/// to 0 is responsible for ending `codegen_prog_node`. While semantic analysis is happening, this
/// value bottoms out at 1 instead of 0, to ensure that it can only drop to 0 after analysis is
/// completed (since semantic analysis could trigger more codegen work).
pending_codegen_jobs: std.atomic.Value(u32) = .init(0),
/// This is the progress node *under* `sema_prog_node` which is currently running.
/// When we have to pause to analyze something else, we just temporarily rename this node.
/// Eventually, when we thread semantic analysis, we will want one of these per thread.
cur_sema_prog_node: std.Progress.Node = .none,
/// Used by AstGen worker to load and store ZIR cache.
global_zir_cache: Cache.Directory,
/// Used by AstGen worker to load and store ZIR cache.
local_zir_cache: Cache.Directory,
/// This is where all `Export` values are stored. Not all values here are necessarily valid exports;
/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted.
all_exports: std.ArrayList(Export) = .empty,
/// This is a list of free indices in `all_exports`. These indices may be reused by exports from
/// future semantic analysis.
free_exports: std.ArrayList(Export.Index) = .empty,
/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of
/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit`
/// whose analysis triggered the export.
single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, Export.Index) = .empty,
/// Like `single_exports`, but for `AnalUnit`s which perform multiple exports.
/// The exports are `all_exports.items[index..][0..len]`.
multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
index: u32,
len: u32,
}) = .{},
/// Key is the digest returned by `Builtin.hash`; value is the corresponding module.
builtin_modules: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, *Package.Module) = .empty,
/// Populated as soon as the `Compilation` is created. Guaranteed to contain all modules, even builtin ones.
/// Modules whose root file is not a Zig or ZON file have the value `.none`.
module_roots: std.AutoArrayHashMapUnmanaged(*Package.Module, File.Index.Optional) = .empty,
/// The set of all the Zig source files in the Zig Compilation Unit. Tracked in
/// order to iterate over it and check which source files have been modified on
/// the file system when an update is requested, as well as to cache `@import`
/// results.
///
/// Always accessed through `ImportTableAdapter`, where keys are fully resolved
/// file paths in order to ensure files are properly deduplicated. This table owns
/// the keysand values.
///
/// Protected by Compilation's mutex.
///
/// Not serialized. This state is reconstructed during the first call to
/// `Compilation.update` of the process for a given `Compilation`.
import_table: std.ArrayHashMapUnmanaged(
File.Index,
void,
struct {
pub const hash = @compileError("all accesses should be through ImportTableAdapter");
pub const eql = @compileError("all accesses should be through ImportTableAdapter");
},
true, // This is necessary! Without it, the map tries to use its Context to rehash. #21918
) = .empty,
/// The set of all files in `import_table` which are "alive" this update, meaning
/// they are reachable by traversing imports starting from an analysis root. This
/// is usually all files in `import_table`, but some could be omitted if an incremental
/// update removes an import, or if a module specified on the CLI is never imported.
/// Reconstructed on every update, after AstGen and before Sema.
/// Value is why the file is alive.
alive_files: std.AutoArrayHashMapUnmanaged(File.Index, File.Reference) = .empty,
/// If this is populated, a "file exists in multiple modules" error should be emitted.
/// This causes file errors to not be shown, because we don't really know which files
/// should be alive (because the user has messed up their imports somewhere!).
/// Cleared and recomputed every update, after AstGen and before Sema.
multi_module_err: ?struct {
file: File.Index,
modules: [2]*Package.Module,
refs: [2]File.Reference,
} = null,
/// The set of all the files which have been loaded with `@embedFile` in the Module.
/// We keep track of this in order to iterate over it and check which files have been
/// modified on the file system when an update is requested, as well as to cache
/// `@embedFile` results.
///
/// Like `import_table`, this is accessed through `EmbedTableAdapter`, so that it is keyed
/// on the `Compilation.Path` of the `EmbedFile`.
///
/// This table owns all of the `*EmbedFile` memory, which is allocated into gpa.
embed_table: std.ArrayHashMapUnmanaged(
*EmbedFile,
void,
struct {
pub const hash = @compileError("all accesses should be through EmbedTableAdapter");
pub const eql = @compileError("all accesses should be through EmbedTableAdapter");
},
true, // This is necessary! Without it, the map tries to use its Context to rehash. #21918
) = .empty,
/// Stores all Type and Value objects.
/// The idea is that this will be periodically garbage-collected, but such logic
/// is not yet implemented.
intern_pool: InternPool = .empty,
/// Value explains why this `AnalUnit` is being analyzed. It is `null` for the topmost analysis
/// (index 0), and non-`null` for all others.
analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, ?*const DependencyReason) = .empty,
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .empty,
/// This `AnalUnit` failed semantic analysis because it required analysis of another `AnalUnit` which itself failed.
transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
/// This `Nav` succeeded analysis, but failed codegen.
/// This may be a simple "value" `Nav`, or it may be a function.
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
/// While multiple threads are active (most of the time!), this is guarded by `zcu.comp.mutex`, as
/// codegen and linking run on a separate thread.
failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
failed_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, *ErrorMsg) = .empty,
/// Key is an `AnalUnit` which is in `dependency_loop_nodes`. For each dependency loop, exactly one
/// unit in the loop is in this map, though the choice is arbitrary and not necessarily reproducible
/// between compilations. So, instead of (for instance) defining where the dependency loop "starts",
/// this map simply exists to allow easily iterating all dependency loops exactly once.
dependency_loops: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
/// Key is an `AnalUnit`, value is the `AnalUnit` which the key references and why it does so.
/// All units in here form loops. To iterate loops, see `dependency_loops`.
dependency_loop_nodes: std.AutoArrayHashMapUnmanaged(AnalUnit, struct {
unit: AnalUnit,
reason: DependencyReason,
}) = .empty,
/// Keep track of `@compileLog`s per `AnalUnit`.
/// We track the source location of the first `@compileLog` call, and all logged lines as a linked list.
/// The list is singly linked, but we do track its tail for fast appends (optimizing many logs in one unit).
compile_logs: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
base_node_inst: InternPool.TrackedInst.Index,
node_offset: Ast.Node.Offset,
first_line: CompileLogLine.Index,
last_line: CompileLogLine.Index,
pub fn src(self: @This()) LazySrcLoc {
return .{
.base_node_inst = self.base_node_inst,
.offset = LazySrcLoc.Offset.nodeOffset(self.node_offset),
};
}
}) = .empty,
compile_log_lines: std.ArrayList(CompileLogLine) = .empty,
free_compile_log_lines: std.ArrayList(CompileLogLine.Index) = .empty,
/// This tracks files which triggered errors when generating AST/ZIR/ZOIR.
/// If not `null`, the value is a retryable error (the file status is guaranteed
/// to be `.retryable_failure`). Otherwise, the file status is `.astgen_failure`
/// or `.success`, and there are ZIR/ZOIR errors which should be printed.
/// We just store a `[]u8` instead of a full `*ErrorMsg`, because the source
/// location is always the entire file. The `[]u8` memory is owned by the map
/// and allocated into `gpa`.
failed_files: std.AutoArrayHashMapUnmanaged(File.Index, ?[]u8) = .empty,
/// AstGen is not aware of modules, and so cannot determine whether an import
/// string makes sense. That is the job of a traversal after AstGen.
///
/// There are several ways in which an import can fail:
///
/// * It is an import of a file which does not exist. This case is not handled
/// by this field, but with a `failed_files` entry on the *imported* file.
/// * It is an import of a module which does not exist in the current module's
/// dependency table. This happens at `Sema` time, so is not tracked by this
/// field.
/// * It is an import which reaches outside of the current module's root
/// directory. This is tracked by this field.
/// * It is an import which reaches into an "illegal import directory". Right now,
/// the only such directory is 'global_cache/b/', but in general, these are
/// directories the compiler treats specially. This is tracked by this field.
///
/// This is a flat array containing all of the relevant errors. It is cleared and
/// recomputed on every update. The errors here are fatal, i.e. they block any
/// semantic analysis this update.
///
/// Allocated into gpa.
failed_imports: std.ArrayList(struct {
file_index: File.Index,
import_string: Zir.NullTerminatedString,
import_token: Ast.TokenIndex,
kind: enum { file_outside_module_root, illegal_zig_import },
}) = .empty,
failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty,
/// If analysis failed due to a cimport error, the corresponding Clang errors
/// are stored here.
cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty,
/// Maximum amount of distinct error values, set by --error-limit
error_limit: ErrorInt,
/// In safe builds, `Type.assertHasLayout` may be called cross-thread, so this lock
/// guards accesses to `outdated` and `potentially_outdated`. In unsafe builds, the
/// lock is not needed and is compiled out.
outdated_lock: if (std.debug.runtime_safety) std.Io.RwLock else void = if (std.debug.runtime_safety) .init,
/// Value is the number of PO dependencies of this AnalUnit.
/// This value will decrease as we perform semantic analysis to learn what is outdated.
/// If any of these PO deps is outdated, this value will be moved to `outdated`.
potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
/// Value is the number of PO dependencies of this AnalUnit.
/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
/// This is the set of all `AnalUnit`s in `outdated` whose PO dependency count is 0.
/// Such `AnalUnit`s are ready for immediate re-analysis.
/// See `findOutdatedToAnalyze` for details.
outdated_ready: struct {
/// These are separate from other units because it allows `findOutdatedToAnalyze` to prioritize
/// functions, which is useful because it means they will be sent to codegen more quickly.
funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
/// Does not contain `.func` units.
other: std.AutoArrayHashMapUnmanaged(AnalUnit, void),
} = .{ .funcs = .empty, .other = .empty },
/// This contains a list of AnalUnit whose analysis or codegen failed, but the
/// failure was something like running out of disk space, and trying again may
/// succeed. On the next update, we will flush this list, marking all members of
/// it as outdated.
retryable_failures: std.ArrayList(AnalUnit) = .empty,
/// These are the modules which we initially queue for analysis in `Compilation.update`.
/// `resolveReferences` will use these as the root of its reachability traversal.
analysis_roots_buffer: [5]*Package.Module,
analysis_roots_len: usize = 0,
/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
/// Allocated into `gpa`.
resolved_references: ?std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = null,
/// If `true`, then semantic analysis must not occur on this update due to AstGen errors.
/// Essentially the entire pipeline after AstGen, including Sema, codegen, and link, is skipped.
/// Reset to `false` at the start of each update in `Compilation.update`.
skip_analysis_this_update: bool = false,
test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
global_assembly: std.AutoArrayHashMapUnmanaged(AnalUnit, []u8) = .empty,
/// Key is the `AnalUnit` *performing* the reference. This representation allows
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
/// Value is index into `all_references` of the first reference triggered by the unit.
/// The `next` field on the `Reference` forms a linked list of all references
/// triggered by the key `AnalUnit`.
reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
all_references: std.ArrayList(Reference) = .empty,
/// Freelist of indices in `all_references`.
free_references: std.ArrayList(u32) = .empty,
inline_reference_frames: std.ArrayList(InlineReferenceFrame) = .empty,
free_inline_reference_frames: std.ArrayList(InlineReferenceFrame.Index) = .empty,
/// Key is the `AnalUnit` *performing* the reference. This representation allows
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
/// Value is index into `all_type_reference` of the first reference triggered by the unit.
/// The `next` field on the `TypeReference` forms a linked list of all type references
/// triggered by the key `AnalUnit`.
type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
all_type_references: std.ArrayList(TypeReference) = .empty,
/// Freelist of indices in `all_type_references`.
free_type_references: std.ArrayList(u32) = .empty,
/// Populated by analysis of `AnalUnit.wrap(.{ .memoized_state = s })`, where `s` depends on the element.
builtin_decl_values: BuiltinDecl.Memoized = .initFill(.none),
incremental_debug_state: if (build_options.enable_debug_extensions) IncrementalDebugState else void =
if (build_options.enable_debug_extensions) .init else {},
/// Times semantic analysis of the current `AnalUnit`. When we pause to analyze a different unit,
/// this timer must be temporarily paused and resumed later.
cur_analysis_timer: ?Compilation.Timer = null,
codegen_task_pool: CodegenTaskPool,
generation: u32 = 0,
pub const DependencyReason = struct {
src: LazySrcLoc,
/// Only populated if this is for a `.type_layout` unit.
type_layout_reason: Sema.type_resolution.LayoutResolveReason,
};
pub const IncrementalDebugState = struct {
/// All container types in the ZCU, even dead ones.
/// Value is the generation the type was created on.
types: std.AutoArrayHashMapUnmanaged(InternPool.Index, u32),
/// All `Nav`s in the ZCU, even dead ones.
/// Value is the generation the `Nav` was created on.
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, u32),
/// All `AnalUnit`s in the ZCU, even dead ones.
units: std.AutoArrayHashMapUnmanaged(AnalUnit, UnitInfo),
pub const init: IncrementalDebugState = .{
.types = .empty,
.navs = .empty,
.units = .empty,
};
pub fn deinit(ids: *IncrementalDebugState, gpa: Allocator) void {
for (ids.units.values()) |*unit_info| {
unit_info.deps.deinit(gpa);
}
ids.types.deinit(gpa);
ids.navs.deinit(gpa);
ids.units.deinit(gpa);
}
pub const UnitInfo = struct {
last_update_gen: u32,
/// This information isn't easily recoverable from `InternPool`'s dependency storage format.
deps: std.ArrayList(InternPool.Dependee),
};
pub fn getUnitInfo(ids: *IncrementalDebugState, gpa: Allocator, unit: AnalUnit) Allocator.Error!*UnitInfo {
const gop = try ids.units.getOrPut(gpa, unit);
if (!gop.found_existing) gop.value_ptr.* = .{
.last_update_gen = std.math.maxInt(u32),
.deps = .empty,
};
return gop.value_ptr;
}
pub fn newType(ids: *IncrementalDebugState, zcu: *Zcu, ty: InternPool.Index) Allocator.Error!void {
try ids.types.putNoClobber(zcu.gpa, ty, zcu.generation);
}
pub fn newNav(ids: *IncrementalDebugState, zcu: *Zcu, nav: InternPool.Nav.Index) Allocator.Error!void {
try ids.navs.putNoClobber(zcu.gpa, nav, zcu.generation);
}
};
pub const PerThread = @import("Zcu/PerThread.zig");
pub const ImportTableAdapter = struct {
zcu: *const Zcu,
pub fn hash(ctx: ImportTableAdapter, path: Compilation.Path) u32 {
_ = ctx;
return @truncate(std.hash.Wyhash.hash(@intFromEnum(path.root), path.sub_path));
}
pub fn eql(ctx: ImportTableAdapter, a_path: Compilation.Path, b_file: File.Index, b_index: usize) bool {
_ = b_index;
const b_path = ctx.zcu.fileByIndex(b_file).path;
return a_path.root == b_path.root and mem.eql(u8, a_path.sub_path, b_path.sub_path);
}
};
pub const EmbedTableAdapter = struct {
pub fn hash(ctx: EmbedTableAdapter, path: Compilation.Path) u32 {
_ = ctx;
return @truncate(std.hash.Wyhash.hash(@intFromEnum(path.root), path.sub_path));
}
pub fn eql(ctx: EmbedTableAdapter, a_path: Compilation.Path, b_file: *EmbedFile, b_index: usize) bool {
_ = ctx;
_ = b_index;
const b_path = b_file.path;
return a_path.root == b_path.root and mem.eql(u8, a_path.sub_path, b_path.sub_path);
}
};
/// Names of declarations in `std.builtin` whose values are memoized in a `BuiltinDecl.Memoized`.
/// The name must exactly match the declaration name, as comptime logic is used to compute the namespace accesses.
/// Parent namespaces must be before their children in this enum. For instance, `.Type` must be before `.@"Type.Fn"`.
/// Additionally, parent namespaces must be resolved in the same stage as their children; see `BuiltinDecl.stage`.
pub const BuiltinDecl = enum {
Signedness,
AddressSpace,
CallingConvention,
returnError,
StackTrace,
SourceLocation,
CallModifier,
AtomicOrder,
AtomicRmwOp,
ReduceOp,
FloatMode,
PrefetchOptions,
ExportOptions,
ExternOptions,
BranchHint,
Type,
@"Type.Fn",
@"Type.Fn.Param",
@"Type.Fn.Param.Attributes",
@"Type.Fn.Attributes",
@"Type.Int",
@"Type.Float",
@"Type.Pointer",
@"Type.Pointer.Size",
@"Type.Pointer.Attributes",
@"Type.Array",
@"Type.Vector",
@"Type.Optional",
@"Type.Error",
@"Type.ErrorUnion",
@"Type.EnumField",
@"Type.Enum",
@"Type.Enum.Mode",
@"Type.Union",
@"Type.UnionField",
@"Type.UnionField.Attributes",
@"Type.Struct",
@"Type.StructField",
@"Type.StructField.Attributes",
@"Type.ContainerLayout",
@"Type.Opaque",
@"Type.Declaration",
panic,
@"panic.call",
@"panic.sentinelMismatch",
@"panic.unwrapError",
@"panic.outOfBounds",
@"panic.startGreaterThanEnd",
@"panic.inactiveUnionField",
@"panic.sliceCastLenRemainder",
@"panic.reachedUnreachable",
@"panic.unwrapNull",
@"panic.castToNull",
@"panic.incorrectAlignment",
@"panic.invalidErrorCode",
@"panic.integerOutOfBounds",
@"panic.integerOverflow",
@"panic.shlOverflow",
@"panic.shrOverflow",
@"panic.divideByZero",
@"panic.exactDivisionRemainder",
@"panic.integerPartOutOfBounds",
@"panic.corruptSwitch",
@"panic.shiftRhsTooBig",
@"panic.invalidEnumValue",
@"panic.forLenMismatch",
@"panic.copyLenMismatch",
@"panic.memcpyAlias",
@"panic.noreturnReturned",
VaList,
assembly,
@"assembly.Clobbers",
/// Determines what kind of validation will be done to the decl's value.
pub fn kind(decl: BuiltinDecl) enum { type, func, string } {
return switch (decl) {
.returnError => .func,
.StackTrace,
.CallingConvention,
.SourceLocation,
.Signedness,
.AddressSpace,
.VaList,
.CallModifier,
.AtomicOrder,
.AtomicRmwOp,
.ReduceOp,
.FloatMode,
.PrefetchOptions,
.ExportOptions,
.ExternOptions,
.BranchHint,
.assembly,
.@"assembly.Clobbers",
=> .type,
.Type,
.@"Type.Fn",
.@"Type.Fn.Param",
.@"Type.Fn.Param.Attributes",
.@"Type.Fn.Attributes",
.@"Type.Int",
.@"Type.Float",
.@"Type.Pointer",
.@"Type.Pointer.Size",
.@"Type.Pointer.Attributes",
.@"Type.Array",
.@"Type.Vector",
.@"Type.Optional",
.@"Type.Error",
.@"Type.ErrorUnion",
.@"Type.EnumField",
.@"Type.Enum",
.@"Type.Enum.Mode",
.@"Type.Union",
.@"Type.UnionField",
.@"Type.UnionField.Attributes",
.@"Type.Struct",
.@"Type.StructField",
.@"Type.StructField.Attributes",
.@"Type.ContainerLayout",
.@"Type.Opaque",
.@"Type.Declaration",
=> .type,
.panic => .type,
.@"panic.call",
.@"panic.sentinelMismatch",
.@"panic.unwrapError",
.@"panic.outOfBounds",
.@"panic.startGreaterThanEnd",
.@"panic.inactiveUnionField",
.@"panic.sliceCastLenRemainder",
.@"panic.reachedUnreachable",
.@"panic.unwrapNull",
.@"panic.castToNull",
.@"panic.incorrectAlignment",
.@"panic.invalidErrorCode",
.@"panic.integerOutOfBounds",
.@"panic.integerOverflow",
.@"panic.shlOverflow",
.@"panic.shrOverflow",
.@"panic.divideByZero",
.@"panic.exactDivisionRemainder",
.@"panic.integerPartOutOfBounds",
.@"panic.corruptSwitch",
.@"panic.shiftRhsTooBig",
.@"panic.invalidEnumValue",
.@"panic.forLenMismatch",
.@"panic.copyLenMismatch",
.@"panic.memcpyAlias",
.@"panic.noreturnReturned",
=> .func,
};
}
/// Resolution of these values is done in three distinct stages:
/// * Resolution of `std.builtin.Panic` and everything under it
/// * Resolution of `VaList`
/// * Resolution of `assembly`
/// * Everything else
///
/// Panics are separated because they are provided by the user, so must be able to use
/// things like reification.
///
/// `VaList` is separate because its value depends on the target, so it needs some reflection
/// machinery to work; additionally, it is `@compileError` on some targets, so must be referenced
/// by itself.
///
/// `assembly` is separate because its value depends on the target.
pub fn stage(decl: BuiltinDecl) InternPool.MemoizedStateStage {
return switch (decl) {
.VaList => .va_list,
.assembly, .@"assembly.Clobbers" => .assembly,
else => {
if (@intFromEnum(decl) <= @intFromEnum(BuiltinDecl.@"Type.Declaration")) {
return .main;
} else {
return .panic;
}
},
};
}
/// Based on the tag name, determines how to access this decl; either as a direct child of the
/// `std.builtin` namespace, or as a child of some preceding `BuiltinDecl` value.
pub fn access(decl: BuiltinDecl) union(enum) {
direct: []const u8,
nested: struct { BuiltinDecl, []const u8 },
} {
@setEvalBranchQuota(2000);
return switch (decl) {
inline else => |tag| {
const name = @tagName(tag);
const split = (comptime std.mem.lastIndexOfScalar(u8, name, '.')) orelse return .{ .direct = name };
const parent = @field(BuiltinDecl, name[0..split]);
comptime assert(@intFromEnum(parent) < @intFromEnum(tag)); // dependencies ordered correctly
return .{ .nested = .{ parent, name[split + 1 ..] } };
},
};
}
const Memoized = std.enums.EnumArray(BuiltinDecl, InternPool.Index);
};
pub const SimplePanicId = enum {
reached_unreachable,
unwrap_null,
cast_to_null,
incorrect_alignment,
invalid_error_code,
integer_out_of_bounds,
integer_overflow,
shl_overflow,
shr_overflow,
divide_by_zero,
exact_division_remainder,
integer_part_out_of_bounds,
corrupt_switch,
shift_rhs_too_big,
invalid_enum_value,
for_len_mismatch,
copy_len_mismatch,
memcpy_alias,
noreturn_returned,
pub fn toBuiltin(id: SimplePanicId) BuiltinDecl {
return switch (id) {
// zig fmt: off
.reached_unreachable => .@"panic.reachedUnreachable",
.unwrap_null => .@"panic.unwrapNull",
.cast_to_null => .@"panic.castToNull",
.incorrect_alignment => .@"panic.incorrectAlignment",
.invalid_error_code => .@"panic.invalidErrorCode",
.integer_out_of_bounds => .@"panic.integerOutOfBounds",
.integer_overflow => .@"panic.integerOverflow",
.shl_overflow => .@"panic.shlOverflow",
.shr_overflow => .@"panic.shrOverflow",
.divide_by_zero => .@"panic.divideByZero",
.exact_division_remainder => .@"panic.exactDivisionRemainder",
.integer_part_out_of_bounds => .@"panic.integerPartOutOfBounds",
.corrupt_switch => .@"panic.corruptSwitch",
.shift_rhs_too_big => .@"panic.shiftRhsTooBig",
.invalid_enum_value => .@"panic.invalidEnumValue",
.for_len_mismatch => .@"panic.forLenMismatch",
.copy_len_mismatch => .@"panic.copyLenMismatch",
.memcpy_alias => .@"panic.memcpyAlias",
.noreturn_returned => .@"panic.noreturnReturned",
// zig fmt: on
};
}
};
pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub const CImportError = struct {
offset: u32,
line: u32,
column: u32,
path: ?[*:0]u8,
source_line: ?[*:0]u8,
msg: [*:0]u8,
pub fn deinit(err: CImportError, gpa: Allocator) void {
if (err.path) |some| gpa.free(std.mem.span(some));
if (err.source_line) |some| gpa.free(std.mem.span(some));
gpa.free(std.mem.span(err.msg));
}
};
pub const ErrorInt = u32;
pub const Exported = union(enum) {
/// The Nav being exported. Note this is *not* the Nav corresponding to the AnalUnit performing the export.
nav: InternPool.Nav.Index,
/// Constant value being exported.
uav: InternPool.Index,
pub fn getValue(exported: Exported, zcu: *Zcu) Value {
return switch (exported) {
.nav => |nav| zcu.navValue(nav),
.uav => |uav| Value.fromInterned(uav),
};
}
pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment {
return switch (exported) {
.nav => |nav| zcu.intern_pool.getNav(nav).resolved.?.@"align",
.uav => .none,
};
}
};
pub const Export = struct {
opts: Options,
src: LazySrcLoc,
exported: Exported,
status: enum {
in_progress,
failed,
/// Indicates that the failure was due to a temporary issue, such as an I/O error
/// when writing to the output file. Retrying the export may succeed.
failed_retryable,
complete,
},
pub const Options = struct {
name: InternPool.NullTerminatedString,
linkage: std.builtin.GlobalLinkage = .strong,
section: InternPool.OptionalNullTerminatedString = .none,
visibility: std.builtin.SymbolVisibility = .default,
};
/// Index into `all_exports`.
pub const Index = enum(u32) {
_,
pub fn ptr(i: Index, zcu: *const Zcu) *Export {
return &zcu.all_exports.items[@intFromEnum(i)];
}
};
};
pub const CompileLogLine = struct {
next: Index.Optional,
/// Does *not* include the trailing newline.
data: InternPool.NullTerminatedString,
pub const Index = enum(u32) {
_,
pub fn get(idx: Index, zcu: *Zcu) *CompileLogLine {
return &zcu.compile_log_lines.items[@intFromEnum(idx)];
}
pub fn toOptional(idx: Index) Optional {
return @enumFromInt(@intFromEnum(idx));
}
pub const Optional = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn unwrap(opt: Optional) ?Index {
return switch (opt) {
.none => null,
_ => @enumFromInt(@intFromEnum(opt)),
};
}
};
};
};
pub const Reference = struct {
/// The `AnalUnit` whose semantic analysis was triggered by this reference.
referenced: AnalUnit,
/// Index into `all_references` of the next `Reference` triggered by the same `AnalUnit`.
/// `std.math.maxInt(u32)` is the sentinel.
next: u32,
/// The source location of the reference.
src: LazySrcLoc,
/// If not `.none`, this is the index of the `InlineReferenceFrame` which should appear
/// between the referencer and `referenced` in the reference trace. These frames represent
/// inline calls, which do not create actual references (since they happen in the caller's
/// `AnalUnit`), but do show in the reference trace.
inline_frame: InlineReferenceFrame.Index.Optional,
};
pub const InlineReferenceFrame = struct {
/// The inline *callee*; that is, the function which was called inline.
/// The *caller* is either `parent`, or else the unit causing the original `Reference`.
callee: InternPool.Index,
/// The source location of the inline call, in the *caller*.
call_src: LazySrcLoc,
/// If not `.none`, a frame which should appear directly below this one.
/// This will be the "parent" inline call; this frame's `callee` is our caller.
parent: InlineReferenceFrame.Index.Optional,
pub const Index = enum(u32) {
_,
pub fn ptr(idx: Index, zcu: *Zcu) *InlineReferenceFrame {
return &zcu.inline_reference_frames.items[@intFromEnum(idx)];
}
pub fn toOptional(idx: Index) Optional {
return @enumFromInt(@intFromEnum(idx));
}
pub const Optional = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn unwrap(opt: Optional) ?Index {
return switch (opt) {
.none => null,
_ => @enumFromInt(@intFromEnum(opt)),
};
}
};
};
};
pub const TypeReference = struct {
/// The container type which was referenced.
referenced: InternPool.Index,
/// Index into `all_type_references` of the next `TypeReference` triggered by the same `AnalUnit`.
/// `std.math.maxInt(u32)` is the sentinel.
next: u32,
/// The source location of the reference.
src: LazySrcLoc,
};
/// The container that structs, enums, unions, and opaques have.
pub const Namespace = struct {
parent: OptionalIndex,
file_scope: File.Index,
generation: u32,
/// Will be a struct, enum, union, or opaque.
owner_type: InternPool.Index,
/// Members of the namespace which are marked `pub`.
pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
/// Members of the namespace which are *not* marked `pub`.
priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
/// All `comptime` declarations in this namespace. We store these purely so that incremental
/// compilation can re-use the existing `ComptimeUnit`s when a namespace changes.
comptime_decls: std.ArrayList(InternPool.ComptimeUnit.Id) = .empty,
/// All `test` declarations in this namespace. We store these purely so that incremental
/// compilation can re-use the existing `Nav`s when a namespace changes.
test_decls: std.ArrayList(InternPool.Nav.Index) = .empty,
pub const Index = InternPool.NamespaceIndex;
pub const OptionalIndex = InternPool.OptionalNamespaceIndex;
const NavNameContext = struct {
zcu: *Zcu,
pub fn hash(ctx: NavNameContext, nav: InternPool.Nav.Index) u32 {
const name = ctx.zcu.intern_pool.getNav(nav).name;
return std.hash.int(@intFromEnum(name));
}
pub fn eql(ctx: NavNameContext, a_nav: InternPool.Nav.Index, b_nav: InternPool.Nav.Index, b_index: usize) bool {
_ = b_index;
const a_name = ctx.zcu.intern_pool.getNav(a_nav).name;
const b_name = ctx.zcu.intern_pool.getNav(b_nav).name;
return a_name == b_name;
}
};
pub const NameAdapter = struct {
zcu: *Zcu,
pub fn hash(ctx: NameAdapter, s: InternPool.NullTerminatedString) u32 {
_ = ctx;
return std.hash.int(@intFromEnum(s));
}
pub fn eql(ctx: NameAdapter, a: InternPool.NullTerminatedString, b_nav: InternPool.Nav.Index, b_index: usize) bool {
_ = b_index;
return a == ctx.zcu.intern_pool.getNav(b_nav).name;
}
};
pub fn fileScope(ns: Namespace, zcu: *Zcu) *File {
return zcu.fileByIndex(ns.file_scope);
}
pub fn fileScopeIp(ns: Namespace, ip: *InternPool) *File {
return ip.filePtr(ns.file_scope);
}
/// This renders e.g. "std/fs.zig:Dir.OpenOptions"
pub fn renderFullyQualifiedDebugName(
ns: Namespace,
zcu: *Zcu,
name: InternPool.NullTerminatedString,
writer: *Writer,
) @TypeOf(writer).Error!void {
const sep: u8 = if (ns.parent.unwrap()) |parent| sep: {
try zcu.namespacePtr(parent).renderFullyQualifiedDebugName(
zcu,
zcu.declPtr(ns.decl_index).name,
writer,
);
break :sep '.';
} else sep: {
try ns.fileScope(zcu).renderFullyQualifiedDebugName(writer);
break :sep ':';
};
if (name != .empty) try writer.print("{c}{f}", .{ sep, name.fmt(&zcu.intern_pool) });
}
pub fn internFullyQualifiedName(
ns: Namespace,
ip: *InternPool,
gpa: Allocator,
io: Io,
tid: Zcu.PerThread.Id,
name: InternPool.NullTerminatedString,
) !InternPool.NullTerminatedString {
const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip);
if (name == .empty) return ns_name;
return ip.getOrPutStringFmt(gpa, io, tid, "{f}.{f}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls);
}
};
pub const File = struct {
status: enum {
/// We have not yet attempted to load this file.
/// `stat` is not populated and may be `undefined`.
never_loaded,
/// A filesystem access failed. It should be retried on the next update.
/// There is guaranteed to be a `failed_files` entry with at least one message.
/// ZIR/ZOIR errors should not be emitted as `zir`/`zoir` is not up-to-date.
/// `stat` is not populated and may be `undefined`.
retryable_failure,
/// This file has failed parsing, AstGen, or ZonGen.
/// There is guaranteed to be a `failed_files` entry, which may or may not have messages.
/// ZIR/ZOIR errors *should* be emitted as `zir`/`zoir` is up-to-date.
/// `stat` is populated.
astgen_failure,
/// Parsing and AstGen/ZonGen of this file has succeeded.
/// There may still be a `failed_files` entry, e.g. for non-fatal AstGen errors.
/// `stat` is populated.
success,
},
/// Whether this is populated depends on `status`.
stat: Cache.File.Stat,
/// Whether this file is the generated file of a "builtin" module. This matters because those
/// files are generated and stored in-nemory rather than being read off-disk. The rest of the
/// pipeline generally shouldn't care about this.
is_builtin: bool,
/// The path of this file. It is important that this path has a "canonical form" because files
/// are deduplicated based on path; `Compilation.Path` guarantees this. Owned by this `File`,
/// allocated into `gpa`.
path: Compilation.Path,
/// Populated only when emitting error messages; see `getSource`.
source: ?[:0]const u8,
/// Populated only when emitting error messages; see `getTree`.
tree: ?Ast,
zir: ?Zir,
zoir: ?Zoir,
/// Module that this file is a part of, managed externally.
/// This is initially `null`. After AstGen, a pass is run to determine which module each
/// file belongs to, at which point this field is set. It is never set to `null` again;
/// this is so that if the file starts belonging to a different module instead, we can
/// tell, and invalidate dependencies as needed (see `module_changed`).
/// During semantic analysis, this is always non-`null` for alive files (i.e. those which
/// have imports targeting them).
mod: ?*Package.Module,
/// Relative to the root directory of `mod`. If `mod == null`, this field is `undefined`.
/// This memory is managed externally and must not be directly freed.
/// Its lifetime is at least equal to that of this `File`.
sub_file_path: []const u8,
/// If this file's module identity changes on an incremental update, this flag is set to signal
/// to `Zcu.updateZirRefs` that all references to this file must be invalidated. This matters
/// because changing your module changes things like your optimization mode and codegen flags,
/// so everything needs to be re-done. `updateZirRefs` is responsible for resetting this flag.
module_changed: bool,
/// The ZIR for this file from the last update with no file failures. As such, this ZIR is never
/// failed (although it may have compile errors).
///
/// Because updates with file failures do not perform ZIR mapping or semantic analysis, we keep
/// this around so we have the "old" ZIR to map when an update is ready to do so. Once such an
/// update occurs, this field is unloaded, since it is no longer necessary.
///
/// In other words, if `TrackedInst`s are tied to ZIR other than what's in the `zir` field, this
/// field is populated with that old ZIR.
prev_zir: ?*Zir,
/// This field serves a similar purpose to `prev_zir`, but for ZOIR. However, since we do not
/// need to map old ZOIR to new ZOIR -- instead only invalidating dependencies if the ZOIR
/// changed -- this field is just a simple boolean.
///
/// When `zoir` is updated, this field is set to `true`. In `updateZirRefs`, if this is `true`,
/// we invalidate the corresponding `source_file` dependency, and reset it to `false`.
zoir_invalidated: bool,
pub const Path = struct {
root: enum {
cwd,
fs_root,
local_cache,
global_cache,
lib_dir,
},
};
/// A single reference to a file.
pub const Reference = union(enum) {
analysis_root: *Package.Module,
import: struct {
importer: Zcu.File.Index,
tok: Ast.TokenIndex,
/// If the file is imported as the root of a module, this is that module.
/// `null` means the file was imported directly by path.
module: ?*Package.Module,
},
};
pub fn getMode(self: File) Ast.Mode {
// We never create a `File` whose path doesn't give a mode.
return modeFromPath(self.path.sub_path).?;
}
pub fn modeFromPath(path: []const u8) ?Ast.Mode {
if (std.mem.endsWith(u8, path, ".zon")) {
return .zon;
} else if (std.mem.endsWith(u8, path, ".zig")) {
return .zig;
} else {
return null;
}
}
pub fn unload(file: *File, gpa: Allocator) void {
if (file.zoir) |zoir| zoir.deinit(gpa);
file.unloadTree(gpa);
file.unloadSource(gpa);
file.unloadZir(gpa);
}
pub fn unloadTree(file: *File, gpa: Allocator) void {
if (file.tree) |*tree| {
tree.deinit(gpa);
file.tree = null;
}
}
pub fn unloadSource(file: *File, gpa: Allocator) void {
if (file.source) |source| {
gpa.free(source);
file.source = null;
}
}
pub fn unloadZir(file: *File, gpa: Allocator) void {
if (file.zir) |*zir| {
zir.deinit(gpa);
file.zir = null;
}
}
pub const GetSourceError = error{
OutOfMemory,
FileChanged,
} || std.Io.File.OpenError || std.Io.File.Reader.Error;
/// This must only be called in error conditions where `stat` *is* populated. It returns the
/// contents of the source file, assuming the stat has not changed since it was originally
/// loaded.
pub fn getSource(file: *File, zcu: *const Zcu) GetSourceError![:0]const u8 {
const gpa = zcu.gpa;
const io = zcu.comp.io;
if (file.source) |source| return source;
switch (file.status) {
.never_loaded => unreachable, // stat must be populated
.retryable_failure => unreachable, // stat must be populated
.astgen_failure, .success => {},
}
assert(file.stat.size <= std.math.maxInt(u32)); // `PerThread.updateFile` checks this
var f = f: {
const dir, const sub_path = file.path.openInfo(zcu.comp.dirs);
break :f try dir.openFile(io, sub_path, .{});
};
defer f.close(io);
const stat = f.stat(io) catch |err| switch (err) {
error.Streaming => {
// Since `file.stat` is populated, this was previously a file stream; since it is
// now not a file stream, it must have changed.
return error.FileChanged;
},
else => |e| return e,
};
if (stat.inode != file.stat.inode or
stat.size != file.stat.size or
stat.mtime.nanoseconds != file.stat.mtime.nanoseconds)
{
return error.FileChanged;
}
const source = try gpa.allocSentinel(u8, @intCast(file.stat.size), 0);
errdefer gpa.free(source);
var file_reader = f.reader(io, &.{});
file_reader.size = stat.size;
file_reader.interface.readSliceAll(source) catch return file_reader.err.?;
file.source = source;
errdefer comptime unreachable; // don't error after populating `source`
return source;
}
/// This must only be called in error conditions where `stat` *is* populated. It returns the
/// parsed AST of the source file, assuming the stat has not changed since it was originally
/// loaded.
pub fn getTree(file: *File, zcu: *const Zcu) GetSourceError!*const Ast {
if (file.tree) |*tree| return tree;
const source = try file.getSource(zcu);
file.tree = try .parse(zcu.gpa, source, file.getMode());
return &file.tree.?;
}
pub fn fullyQualifiedNameLen(file: File) usize {
const ext = std.fs.path.extension(file.sub_file_path);
return file.sub_file_path.len - ext.len;
}
pub fn renderFullyQualifiedName(file: File, writer: *Writer) !void {
// Convert all the slashes into dots and truncate the extension.
const ext = std.fs.path.extension(file.sub_file_path);
const noext = file.sub_file_path[0 .. file.sub_file_path.len - ext.len];
for (noext) |byte| switch (byte) {
'/', '\\' => try writer.writeByte('.'),
else => try writer.writeByte(byte),
};
}
pub fn renderFullyQualifiedDebugName(file: File, writer: *Writer) !void {
for (file.sub_file_path) |byte| switch (byte) {
'/', '\\' => try writer.writeByte('/'),
else => try writer.writeByte(byte),
};
}
pub fn internFullyQualifiedName(file: File, pt: Zcu.PerThread) !InternPool.NullTerminatedString {
const ip = &pt.zcu.intern_pool;
const comp = pt.zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const string_bytes = ip.getLocal(pt.tid).getMutableStringBytes(gpa, io);
var w: Writer = .fixed((try string_bytes.addManyAsSlice(file.fullyQualifiedNameLen()))[0]);
file.renderFullyQualifiedName(&w) catch unreachable;
assert(w.end == w.buffer.len);
return ip.getOrPutTrailingString(gpa, io, pt.tid, @intCast(w.end), .no_embedded_nulls);
}
pub const Index = InternPool.FileIndex;
pub fn errorBundleWholeFileSrc(
file: *File,
zcu: *const Zcu,
eb: *std.zig.ErrorBundle.Wip,
) Allocator.Error!std.zig.ErrorBundle.SourceLocationIndex {
return eb.addSourceLocation(.{
.src_path = try eb.printString("{f}", .{file.path.fmt(zcu.comp)}),
.span_start = 0,
.span_main = 0,
.span_end = 0,
.line = 0,
.column = 0,
.source_line = 0,
});
}
/// Asserts that the tree has already been loaded with `getTree`.
pub fn errorBundleTokenSrc(
file: *File,
tok: Ast.TokenIndex,
zcu: *const Zcu,
eb: *std.zig.ErrorBundle.Wip,
) Allocator.Error!std.zig.ErrorBundle.SourceLocationIndex {
const tree = &file.tree.?;
const start = tree.tokenStart(tok);
const end = start + tree.tokenSlice(tok).len;
const loc = std.zig.findLineColumn(file.source.?, start);
return eb.addSourceLocation(.{
.src_path = try eb.printString("{f}", .{file.path.fmt(zcu.comp)}),
.span_start = start,
.span_main = start,
.span_end = @intCast(end),
.line = @intCast(loc.line),
.column = @intCast(loc.column),
.source_line = try eb.addString(loc.source_line),
});
}
};
/// Represents the contents of a file loaded with `@embedFile`.
pub const EmbedFile = struct {
path: Compilation.Path,
/// `.none` means the file was not loaded, so `stat` is undefined.
val: InternPool.Index,
/// If this is `null` and `val` is `.none`, the file has never been loaded.
err: ?(Io.File.OpenError || Io.File.StatError || Io.File.Reader.Error || error{UnexpectedEof}),
stat: Cache.File.Stat,
pub const Index = enum(u32) {
_,
pub fn get(idx: Index, zcu: *const Zcu) *EmbedFile {
return zcu.embed_table.keys()[@intFromEnum(idx)];
}
};
};
/// This struct holds data necessary to construct API-facing `AllErrors.Message`.
/// Its memory is managed with the general purpose allocator so that they
/// can be created and destroyed in response to incremental updates.
pub const ErrorMsg = struct {
src_loc: LazySrcLoc,
msg: []const u8,
notes: []ErrorMsg = &.{},
reference_trace_root: AnalUnit.Optional = .none,
pub fn order(lhs: *const ErrorMsg, rhs: *const ErrorMsg, zcu: *Zcu) std.math.Order {
return lhs.src_loc.order(rhs.src_loc, zcu).differ() orelse
std.mem.order(u8, lhs.msg, rhs.msg).differ() orelse
std.math.order(lhs.notes.len, rhs.notes.len).differ() orelse
for (lhs.notes, rhs.notes) |*lhs_note, *rhs_note| {
if (order(lhs_note, rhs_note, zcu).differ()) |o| break o;
} else .eq;
}
pub fn create(
gpa: Allocator,
src_loc: LazySrcLoc,
comptime format: []const u8,
args: anytype,
) !*ErrorMsg {
assert(src_loc.offset != .unneeded);
const err_msg = try gpa.create(ErrorMsg);
errdefer gpa.destroy(err_msg);
err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args);
return err_msg;
}
/// Assumes the ErrorMsg struct and msg were both allocated with `gpa`,
/// as well as all notes.
pub fn destroy(err_msg: *ErrorMsg, gpa: Allocator) void {
err_msg.deinit(gpa);
gpa.destroy(err_msg);
}
pub fn init(gpa: Allocator, src_loc: LazySrcLoc, comptime format: []const u8, args: anytype) !ErrorMsg {
return .{
.src_loc = src_loc,
.msg = try std.fmt.allocPrint(gpa, format, args),
};
}
pub fn deinit(err_msg: *ErrorMsg, gpa: Allocator) void {
for (err_msg.notes) |*note| {
note.deinit(gpa);
}
gpa.free(err_msg.notes);
gpa.free(err_msg.msg);
err_msg.* = undefined;
}
};
pub const AstGenSrc = union(enum) {
root,
import: struct {
importing_file: Zcu.File.Index,
import_tok: std.zig.Ast.TokenIndex,
},
};
/// Canonical reference to a position within a source file.
pub const SrcLoc = struct {
file_scope: *File,
base_node: Ast.Node.Index,
/// Relative to `base_node`.
lazy: LazySrcLoc.Offset,
pub fn baseSrcToken(src_loc: SrcLoc) Ast.TokenIndex {
const tree = src_loc.file_scope.tree.?;
return tree.firstToken(src_loc.base_node);
}
pub const Span = Ast.Span;
pub fn span(src_loc: SrcLoc, zcu: *const Zcu) !Span {
switch (src_loc.lazy) {
.unneeded => unreachable,
.byte_abs => |byte_index| return Span{ .start = byte_index, .end = byte_index + 1, .main = byte_index },
.token_abs => |tok_index| {
const tree = try src_loc.file_scope.getTree(zcu);
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_abs => |node| {
const tree = try src_loc.file_scope.getTree(zcu);
return tree.nodeToSpan(node);
},
.byte_offset => |byte_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const tok_index = src_loc.baseSrcToken();
const start = tree.tokenStart(tok_index) + byte_off;
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.token_offset => |tok_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const tok_index = tok_off.toAbsolute(src_loc.baseSrcToken());
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_offset => |traced_off| {
const node_off = traced_off.x;
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(node);
},
.node_offset_main_token => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const main_token = tree.nodeMainToken(node);
return tree.tokensToSpan(main_token, main_token, main_token);
},
.node_offset_bin_op => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(node);
},
.node_offset_initializer => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.tokensToSpan(
tree.firstToken(node) - 3,
tree.lastToken(node),
tree.nodeMainToken(node) - 2,
);
},
.node_offset_var_decl_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = switch (tree.nodeTag(node)) {
.global_var_decl,
.local_var_decl,
.simple_var_decl,
.aligned_var_decl,
=> tree.fullVarDecl(node).?,
else => unreachable,
};
if (full.ast.type_node.unwrap()) |type_node| {
return tree.nodeToSpan(type_node);
}
const tok_index = full.ast.mut_token + 1; // the name token
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_var_decl_align => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const align_node = if (tree.fullVarDecl(node)) |v|
v.ast.align_node.unwrap().?
else if (tree.fullFnProto(&buf, node)) |f|
f.ast.align_expr.unwrap().?
else
unreachable;
return tree.nodeToSpan(align_node);
},
.node_offset_var_decl_section => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const section_node = if (tree.fullVarDecl(node)) |v|
v.ast.section_node.unwrap().?
else if (tree.fullFnProto(&buf, node)) |f|
f.ast.section_expr.unwrap().?
else
unreachable;
return tree.nodeToSpan(section_node);
},
.node_offset_var_decl_addrspace => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const addrspace_node = if (tree.fullVarDecl(node)) |v|
v.ast.addrspace_node.unwrap().?
else if (tree.fullFnProto(&buf, node)) |f|
f.ast.addrspace_expr.unwrap().?
else
unreachable;
return tree.nodeToSpan(addrspace_node);
},
.node_offset_var_decl_init => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const init_node = switch (tree.nodeTag(node)) {
.global_var_decl,
.local_var_decl,
.aligned_var_decl,
.simple_var_decl,
=> tree.fullVarDecl(node).?.ast.init_node.unwrap().?,
.assign_destructure => tree.assignDestructure(node).ast.value_expr,
else => unreachable,
};
return tree.nodeToSpan(init_node);
},
.node_offset_builtin_call_arg => |builtin_arg| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = builtin_arg.builtin_call_node.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
const params = tree.builtinCallParams(&buf, node).?;
return tree.nodeToSpan(params[builtin_arg.arg_index]);
},
.node_offset_ptrcast_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
var node = node_off.toAbsolute(src_loc.base_node);
while (true) {
switch (tree.nodeTag(node)) {
.builtin_call_two, .builtin_call_two_comma => {},
else => break,
}
const first_arg, const second_arg = tree.nodeData(node).opt_node_and_opt_node;
if (first_arg == .none) break; // 0 args
if (second_arg != .none) break; // 2 args
const builtin_token = tree.nodeMainToken(node);
const builtin_name = tree.tokenSlice(builtin_token);
const info = BuiltinFn.list.get(builtin_name) orelse break;
switch (info.tag) {
else => break,
.ptr_cast,
.align_cast,
.addrspace_cast,
.const_cast,
.volatile_cast,
=> {},
}
node = first_arg.unwrap().?;
}
return tree.nodeToSpan(node);
},
.node_offset_array_access_index => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
},
.node_offset_slice_ptr,
.node_offset_slice_start,
.node_offset_slice_end,
.node_offset_slice_sentinel,
=> |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullSlice(node).?;
const part_node = switch (src_loc.lazy) {
.node_offset_slice_ptr => full.ast.sliced,
.node_offset_slice_start => full.ast.start,
.node_offset_slice_end => full.ast.end.unwrap().?,
.node_offset_slice_sentinel => full.ast.sentinel.unwrap().?,
else => unreachable,
};
return tree.nodeToSpan(part_node);
},
.node_offset_call_func => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullCall(&buf, node).?;
return tree.nodeToSpan(full.ast.fn_expr);
},
.node_offset_field_name => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const tok_index = switch (tree.nodeTag(node)) {
.field_access => tree.nodeData(node).node_and_token[1],
.call_one,
.call_one_comma,
.call,
.call_comma,
=> blk: {
const full = tree.fullCall(&buf, node).?;
break :blk tree.lastToken(full.ast.fn_expr);
},
else => tree.firstToken(node) - 2,
};
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_field_name_init => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const tok_index = tree.firstToken(node) - 2;
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_deref_ptr => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(node);
},
.node_offset_asm_source => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
return tree.nodeToSpan(full.ast.template);
},
.node_offset_asm_ret_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
const asm_output = full.outputs[0];
return tree.nodeToSpan(tree.nodeData(asm_output).opt_node_and_token[0].unwrap().?);
},
.node_offset_if_cond => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const src_node = switch (tree.nodeTag(node)) {
.if_simple,
.@"if",
=> tree.fullIf(node).?.ast.cond_expr,
.while_simple,
.while_cont,
.@"while",
=> tree.fullWhile(node).?.ast.cond_expr,
.for_simple,
.@"for",
=> {
const inputs = tree.fullFor(node).?.ast.inputs;
const start = tree.firstToken(inputs[0]);
const end = tree.lastToken(inputs[inputs.len - 1]);
return tree.tokensToSpan(start, end, start);
},
.@"orelse" => node,
.@"catch" => node,
else => unreachable,
};
return tree.nodeToSpan(src_node);
},
.asm_input => |input| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = input.offset.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
const asm_input = full.inputs[input.input_index];
return tree.nodeToSpan(tree.nodeData(asm_input).node_and_token[0]);
},
.asm_output => |output| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = output.offset.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
const asm_output = full.outputs[output.output_index];
const data = tree.nodeData(asm_output).opt_node_and_token;
return if (data[0].unwrap()) |output_node|
tree.nodeToSpan(output_node)
else
// token points to the ')'
tree.tokenToSpan(data[1] - 1);
},
.asm_clobbers => |offset| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = offset.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
return tree.nodeToSpan(full.ast.clobbers.unwrap().?); // this should only be reachable if the clobbers are written in the source
},
.for_input => |for_input| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = for_input.for_node_offset.toAbsolute(src_loc.base_node);
const for_full = tree.fullFor(node).?;
const src_node = for_full.ast.inputs[for_input.input_index];
return tree.nodeToSpan(src_node);
},
.for_capture_from_input => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const input_node = node_off.toAbsolute(src_loc.base_node);
// We have to actually linear scan the whole AST to find the for loop
// that contains this input.
const node_tags = tree.nodes.items(.tag);
for (node_tags, 0..) |node_tag, node_usize| {
const node: Ast.Node.Index = @enumFromInt(node_usize);
switch (node_tag) {
.for_simple, .@"for" => {
const for_full = tree.fullFor(node).?;
for (for_full.ast.inputs, 0..) |input, input_index| {
if (input_node == input) {
var count = input_index;
var tok = for_full.payload_token;
while (true) {
switch (tree.tokenTag(tok)) {
.comma => {
count -= 1;
tok += 1;
},
.identifier => {
if (count == 0)
return tree.tokensToSpan(tok, tok + 1, tok);
tok += 1;
},
.asterisk => {
if (count == 0)
return tree.tokensToSpan(tok, tok + 2, tok);
tok += 1;
},
else => unreachable,
}
}
}
}
},
else => continue,
}
} else unreachable;
},
.call_arg => |call_arg| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = call_arg.call_node_offset.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
const call_full = tree.fullCall(buf[0..1], node) orelse {
assert(tree.nodeTag(node) == .builtin_call);
const call_args_node: Ast.Node.Index = @enumFromInt(tree.extra_data[@intFromEnum(tree.nodeData(node).extra_range.end) - 1]);
switch (tree.nodeTag(call_args_node)) {
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
=> {
const full = tree.fullArrayInit(&buf, call_args_node).?.ast.elements;
return tree.nodeToSpan(full[call_arg.arg_index]);
},
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
=> {
const full = tree.fullStructInit(&buf, call_args_node).?.ast.fields;
return tree.nodeToSpan(full[call_arg.arg_index]);
},
else => return tree.nodeToSpan(call_args_node),
}
};
return tree.nodeToSpan(call_full.ast.params[call_arg.arg_index]);
},
.fn_proto_param, .fn_proto_param_type => |fn_proto_param| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = fn_proto_param.fn_proto_node_offset.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
var it = full.iterate(tree);
var i: usize = 0;
while (it.next()) |param| : (i += 1) {
if (i != fn_proto_param.param_index) continue;
switch (src_loc.lazy) {
.fn_proto_param_type => if (param.anytype_ellipsis3) |tok| {
return tree.tokenToSpan(tok);
} else {
return tree.nodeToSpan(param.type_expr.?);
},
.fn_proto_param => if (param.anytype_ellipsis3) |tok| {
const first = param.comptime_noalias orelse param.name_token orelse tok;
return tree.tokensToSpan(first, tok, first);
} else {
const first = param.comptime_noalias orelse param.name_token orelse tree.firstToken(param.type_expr.?);
return tree.tokensToSpan(first, tree.lastToken(param.type_expr.?), first);
},
else => unreachable,
}
}
unreachable;
},
.node_offset_bin_lhs => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]);
},
.node_offset_bin_rhs => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
},
.array_cat_lhs, .array_cat_rhs => |cat| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = cat.array_cat_offset.toAbsolute(src_loc.base_node);
const arr_node = if (src_loc.lazy == .array_cat_lhs)
tree.nodeData(node).node_and_node[0]
else
tree.nodeData(node).node_and_node[1];
var buf: [2]Ast.Node.Index = undefined;
switch (tree.nodeTag(arr_node)) {
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
=> {
const full = tree.fullArrayInit(&buf, arr_node).?.ast.elements;
return tree.nodeToSpan(full[cat.elem_index]);
},
else => return tree.nodeToSpan(arr_node),
}
},
.node_offset_try_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node);
},
.node_offset_switch_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const condition, _ = tree.nodeData(node).node_and_extra;
return tree.nodeToSpan(condition);
},
.node_offset_switch_else_prong => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = node_off.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
if (case.ast.values.len == 0) {
return tree.nodeToSpan(case_node);
}
} else unreachable;
},
.node_offset_switch_range => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = node_off.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
for (case.ast.values) |item_node| {
if (tree.nodeTag(item_node) == .switch_range) {
return tree.nodeToSpan(item_node);
}
}
} else unreachable;
},
.node_offset_fn_type_align => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.align_expr.unwrap() orelse node);
},
.node_offset_fn_type_addrspace => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.addrspace_expr.unwrap() orelse node);
},
.node_offset_fn_type_section => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.section_expr.unwrap() orelse node);
},
.node_offset_fn_type_cc => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.callconv_expr.unwrap() orelse node);
},
.node_offset_fn_type_ret_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.return_type.unwrap().?);
},
.node_offset_param => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var first_tok = tree.firstToken(node);
while (true) switch (tree.tokenTag(first_tok - 1)) {
.colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1,
else => break,
};
return tree.tokensToSpan(
first_tok,
tree.lastToken(node),
first_tok,
);
},
.token_offset_param => |token_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const main_token = tree.nodeMainToken(src_loc.base_node);
const tok_index = token_off.toAbsolute(main_token);
var first_tok = tok_index;
while (true) switch (tree.tokenTag(first_tok - 1)) {
.colon, .identifier, .keyword_comptime, .keyword_noalias => first_tok -= 1,
else => break,
};
return tree.tokensToSpan(
first_tok,
tok_index,
first_tok,
);
},
.node_offset_anyframe_type => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
_, const child_type = tree.nodeData(parent_node).token_and_node;
return tree.nodeToSpan(child_type);
},
.node_offset_lib_name => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, parent_node).?;
const tok_index = full.lib_name.?;
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_array_type_len => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullArrayType(parent_node).?;
return tree.nodeToSpan(full.ast.elem_count);
},
.node_offset_array_type_sentinel => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullArrayType(parent_node).?;
return tree.nodeToSpan(full.ast.sentinel.unwrap().?);
},
.node_offset_array_type_elem => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullArrayType(parent_node).?;
return tree.nodeToSpan(full.ast.elem_type);
},
.node_offset_un_op => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node);
},
.node_offset_ptr_elem => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.child_type);
},
.node_offset_ptr_sentinel => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.sentinel.unwrap().?);
},
.node_offset_ptr_align => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.align_node.unwrap().?);
},
.node_offset_ptr_addrspace => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.addrspace_node.unwrap().?);
},
.node_offset_ptr_bitoffset => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.bit_range_start.unwrap().?);
},
.node_offset_ptr_hostsize => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.bit_range_end.unwrap().?);
},
.node_offset_init_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
const type_expr = if (tree.fullArrayInit(&buf, parent_node)) |array_init|
array_init.ast.type_expr.unwrap().?
else
tree.fullStructInit(&buf, parent_node).?.ast.type_expr.unwrap().?;
return tree.nodeToSpan(type_expr);
},
.node_offset_store_ptr => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
switch (tree.nodeTag(node)) {
.assign,
.assign_mul,
.assign_div,
.assign_mod,
.assign_add,
.assign_sub,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_and,
.assign_bit_xor,
.assign_bit_or,
.assign_mul_wrap,
.assign_add_wrap,
.assign_sub_wrap,
.assign_mul_sat,
.assign_add_sat,
.assign_sub_sat,
=> return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]),
else => return tree.nodeToSpan(node),
}
},
.node_offset_store_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
switch (tree.nodeTag(node)) {
.assign,
.assign_mul,
.assign_div,
.assign_mod,
.assign_add,
.assign_sub,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_and,
.assign_bit_xor,
.assign_bit_or,
.assign_mul_wrap,
.assign_add_wrap,
.assign_sub_wrap,
.assign_mul_sat,
.assign_add_sat,
.assign_sub_sat,
=> return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]),
else => return tree.nodeToSpan(node),
}
},
.node_offset_return_operand => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
if (tree.nodeTag(node) == .@"return") {
if (tree.nodeData(node).opt_node.unwrap()) |lhs| {
return tree.nodeToSpan(lhs);
}
}
return tree.nodeToSpan(node);
},
.container_arg => {
const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
if (tree.fullContainerDecl(&buf, node)) |container_decl| {
const arg_node = container_decl.ast.arg.unwrap() orelse return tree.nodeToSpan(node);
return tree.nodeToSpan(arg_node);
} else if (tree.builtinCallParams(&buf, node)) |args| {
// Builtin calls (`@Enum` etc) should use the first argument.
return tree.nodeToSpan(if (args.len > 0) args[0] else node);
} else {
return tree.nodeToSpan(node);
}
},
.container_field_name,
.container_field_value,
.container_field_type,
.container_field_align,
=> |field_idx| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse {
// This could be a reification builtin. These are the args we care about:
// * `@Enum(_, _, names, values)`
// * `@Struct(_, _, names, types, values_and_aligns)`
// * `@Union(_, _, names, types, aligns)`
if (tree.builtinCallParams(&buf, node)) |args| {
const builtin_name = tree.tokenSlice(tree.firstToken(node));
const arg_index: ?u3 = if (std.mem.eql(u8, builtin_name, "@Enum")) switch (src_loc.lazy) {
.container_field_name => 2,
.container_field_value => 3,
.container_field_type => null,
.container_field_align => null,
else => unreachable,
} else if (std.mem.eql(u8, builtin_name, "@Struct")) switch (src_loc.lazy) {
.container_field_name => 2,
.container_field_value => 4,
.container_field_type => 3,
.container_field_align => 4,
else => unreachable,
} else if (std.mem.eql(u8, builtin_name, "@Union")) switch (src_loc.lazy) {
.container_field_name => 2,
.container_field_value => 4,
.container_field_type => 3,
.container_field_align => null,
else => unreachable,
} else null;
if (arg_index) |i| {
if (args.len >= i) return tree.nodeToSpan(args[i]);
}
}
return tree.nodeToSpan(node);
};
var cur_field_idx: usize = 0;
for (container_decl.ast.members) |member_node| {
const field = tree.fullContainerField(member_node) orelse continue;
if (cur_field_idx < field_idx) {
cur_field_idx += 1;
continue;
}
const field_component_node = switch (src_loc.lazy) {
.container_field_name => .none,
.container_field_value => field.ast.value_expr,
.container_field_type => field.ast.type_expr,
.container_field_align => field.ast.align_expr,
else => unreachable,
};
if (field_component_node.unwrap()) |component_node| {
return tree.nodeToSpan(component_node);
} else {
return tree.tokenToSpan(field.ast.main_token);
}
} else unreachable;
},
.tuple_field_type, .tuple_field_init => |field_info| {
const tree = try src_loc.file_scope.getTree(zcu);
const node = field_info.tuple_decl_node_offset.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse
return tree.nodeToSpan(node);
const field = tree.fullContainerField(container_decl.ast.members[field_info.elem_index]).?;
return tree.nodeToSpan(switch (src_loc.lazy) {
.tuple_field_type => field.ast.type_expr.unwrap().?,
.tuple_field_init => field.ast.value_expr.unwrap().?,
else => unreachable,
});
},
.init_elem => |init_elem| {
const tree = try src_loc.file_scope.getTree(zcu);
const init_node = init_elem.init_node_offset.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
if (tree.fullArrayInit(&buf, init_node)) |full| {
const elem_node = full.ast.elements[init_elem.elem_index];
return tree.nodeToSpan(elem_node);
} else if (tree.fullStructInit(&buf, init_node)) |full| {
const field_node = full.ast.fields[init_elem.elem_index];
return tree.tokensToSpan(
tree.firstToken(field_node) - 3,
tree.lastToken(field_node),
tree.nodeMainToken(field_node) - 2,
);
} else unreachable;
},
.init_field_name,
.init_field_linkage,
.init_field_section,
.init_field_visibility,
.init_field_rw,
.init_field_locality,
.init_field_cache,
.init_field_library,
.init_field_thread_local,
.init_field_dll_import,
.init_field_relocation,
.init_field_decoration,
=> |builtin_call_node| {
const wanted = switch (src_loc.lazy) {
.init_field_name => "name",
.init_field_linkage => "linkage",
.init_field_section => "section",
.init_field_visibility => "visibility",
.init_field_rw => "rw",
.init_field_locality => "locality",
.init_field_cache => "cache",
.init_field_library => "library",
.init_field_thread_local => "thread_local",
.init_field_dll_import => "dll_import",
.init_field_relocation => "relocation",
.init_field_decoration => "decoration",
else => unreachable,
};
const tree = try src_loc.file_scope.getTree(zcu);
const node = builtin_call_node.toAbsolute(src_loc.base_node);
var builtin_buf: [2]Ast.Node.Index = undefined;
const args = tree.builtinCallParams(&builtin_buf, node).?;
const arg_node = args[1];
var buf: [2]Ast.Node.Index = undefined;
const full = tree.fullStructInit(&buf, arg_node) orelse
return tree.nodeToSpan(arg_node);
for (full.ast.fields) |field_node| {
// . IDENTIFIER = field_node
const name_token = tree.firstToken(field_node) - 2;
const name = tree.tokenSlice(name_token);
if (std.mem.eql(u8, name, wanted)) {
return tree.tokensToSpan(
name_token - 1,
tree.lastToken(field_node),
tree.nodeMainToken(field_node) - 2,
);
}
}
return tree.nodeToSpan(arg_node);
},
.switch_case_item,
.switch_case_item_range_first,
.switch_case_item_range_last,
.switch_capture,
.switch_tag_capture,
=> {
const switch_node_offset, const want_case_idx = switch (src_loc.lazy) {
.switch_case_item,
.switch_case_item_range_first,
.switch_case_item_range_last,
=> |x| .{ x.switch_node_offset, x.case_idx },
.switch_capture,
.switch_tag_capture,
=> |x| .{ x.switch_node_offset, x.case_idx },
else => unreachable,
};
const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = switch_node_offset.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
var multi_i: u32 = 0;
var scalar_i: u32 = 0;
const case: Ast.full.SwitchCase = case: for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
if (case.ast.values.len == 0) {
if (want_case_idx == Zir.UnwrappedSwitchBlock.Case.Index.@"else") {
break :case case;
}
continue :case;
}
const is_multi = case.ast.values.len != 1 or
tree.nodeTag(case.ast.values[0]) == .switch_range;
switch (want_case_idx.kind) {
.scalar => if (!is_multi and want_case_idx.value == scalar_i)
break :case case,
.multi => if (is_multi and want_case_idx.value == multi_i)
break :case case,
}
if (is_multi) {
multi_i += 1;
} else {
scalar_i += 1;
}
} else unreachable;
const want_item_idx = switch (src_loc.lazy) {
.switch_case_item,
.switch_case_item_range_first,
.switch_case_item_range_last,
=> |x| item_idx: {
assert(want_case_idx != Zir.UnwrappedSwitchBlock.Case.Index.@"else");
break :item_idx x.item_idx;
},
.switch_capture, .switch_tag_capture => {
const start = switch (src_loc.lazy) {
.switch_capture => case.payload_token.?,
.switch_tag_capture => tok: {
var tok = case.payload_token.?;
if (tree.tokenTag(tok) == .asterisk) tok += 1;
tok = tok + 2; // skip over comma
break :tok tok;
},
else => unreachable,
};
const end = switch (tree.tokenTag(start)) {
.asterisk => start + 1,
else => start,
};
return tree.tokensToSpan(start, end, start);
},
else => unreachable,
};
switch (want_item_idx.kind) {
.single => {
var item_i: u32 = 0;
for (case.ast.values) |item_node| {
if (tree.nodeTag(item_node) == .switch_range) {
continue;
}
if (item_i != want_item_idx.value) {
item_i += 1;
continue;
}
return tree.nodeToSpan(item_node);
} else unreachable;
},
.range => {
var range_i: u32 = 0;
for (case.ast.values) |item_node| {
if (tree.nodeTag(item_node) != .switch_range) {
continue;
}
if (range_i != want_item_idx.value) {
range_i += 1;
continue;
}
const first, const last = tree.nodeData(item_node).node_and_node;
return switch (src_loc.lazy) {
.switch_case_item => tree.nodeToSpan(item_node),
.switch_case_item_range_first => tree.nodeToSpan(first),
.switch_case_item_range_last => tree.nodeToSpan(last),
else => unreachable,
};
} else unreachable;
},
}
},
.func_decl_param_comptime => |param_idx| {
const tree = try src_loc.file_scope.getTree(zcu);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, src_loc.base_node).?;
var param_it = full.iterate(tree);
for (0..param_idx) |_| assert(param_it.next() != null);
const param = param_it.next().?;
return tree.tokenToSpan(param.comptime_noalias.?);
},
.func_decl_param_ty => |param_idx| {
const tree = try src_loc.file_scope.getTree(zcu);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, src_loc.base_node).?;
var param_it = full.iterate(tree);
for (0..param_idx) |_| assert(param_it.next() != null);
const param = param_it.next().?;
if (param.anytype_ellipsis3) |tok| {
return tree.tokenToSpan(tok);
} else {
return tree.nodeToSpan(param.type_expr.?);
}
},
}
}
};
pub const LazySrcLoc = struct {
/// This instruction provides the source node locations are resolved relative to.
/// It is a `declaration`, `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl`.
/// This must be valid even if `relative` is an absolute value, since it is required to
/// determine the file which the `LazySrcLoc` refers to.
base_node_inst: InternPool.TrackedInst.Index,
/// This field determines the source location relative to `base_node_inst`.
offset: Offset,
pub const Offset = union(enum) {
/// When this tag is set, the code that constructed this `LazySrcLoc` is asserting
/// that all code paths which would need to resolve the source location are
/// unreachable. If you are debugging this tag incorrectly being this value,
/// look into using reverse-continue with a memory watchpoint to see where the
/// value is being set to this tag.
/// `base_node_inst` is unused.
unneeded,
/// The source location points to a byte offset within a source file,
/// offset from 0. The source file is determined contextually.
byte_abs: u32,
/// The source location points to a token within a source file,
/// offset from 0. The source file is determined contextually.
token_abs: Ast.TokenIndex,
/// The source location points to an AST node within a source file,
/// offset from 0. The source file is determined contextually.
node_abs: Ast.Node.Index,
/// The source location points to a byte offset within a source file,
/// offset from the byte offset of the base node within the file.
byte_offset: u32,
/// This data is the offset into the token list from the base node's first token.
token_offset: Ast.TokenOffset,
/// The source location points to an AST node, which is this value offset
/// from its containing base node AST index.
node_offset: TracedOffset,
/// The source location points to the main token of an AST node, found
/// by taking this AST node index offset from the containing base node.
node_offset_main_token: Ast.Node.Offset,
/// The source location points to the beginning of a struct initializer.
node_offset_initializer: Ast.Node.Offset,
/// The source location points to a variable declaration type expression,
/// found by taking this AST node index offset from the containing
/// base node, which points to a variable declaration AST node. Next, navigate
/// to the type expression.
node_offset_var_decl_ty: Ast.Node.Offset,
/// The source location points to the alignment expression of a var decl.
node_offset_var_decl_align: Ast.Node.Offset,
/// The source location points to the linksection expression of a var decl.
node_offset_var_decl_section: Ast.Node.Offset,
/// The source location points to the addrspace expression of a var decl.
node_offset_var_decl_addrspace: Ast.Node.Offset,
/// The source location points to the initializer of a var decl.
node_offset_var_decl_init: Ast.Node.Offset,
/// The source location points to the given argument of a builtin function call.
/// `builtin_call_node` points to the builtin call.
/// `arg_index` is the index of the argument which hte source location refers to.
node_offset_builtin_call_arg: struct {
builtin_call_node: Ast.Node.Offset,
arg_index: u32,
},
/// Like `node_offset_builtin_call_arg` but recurses through arbitrarily many calls
/// to pointer cast builtins (taking the first argument of the most nested).
node_offset_ptrcast_operand: Ast.Node.Offset,
/// The source location points to the index expression of an array access
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to an array access AST node. Next, navigate
/// to the index expression.
node_offset_array_access_index: Ast.Node.Offset,
/// The source location points to the LHS of a slice expression
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a slice AST node. Next, navigate
/// to the sentinel expression.
node_offset_slice_ptr: Ast.Node.Offset,
/// The source location points to start expression of a slice expression
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a slice AST node. Next, navigate
/// to the sentinel expression.
node_offset_slice_start: Ast.Node.Offset,
/// The source location points to the end expression of a slice
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a slice AST node. Next, navigate
/// to the sentinel expression.
node_offset_slice_end: Ast.Node.Offset,
/// The source location points to the sentinel expression of a slice
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a slice AST node. Next, navigate
/// to the sentinel expression.
node_offset_slice_sentinel: Ast.Node.Offset,
/// The source location points to the callee expression of a function
/// call expression, found by taking this AST node index offset from the containing
/// base node, which points to a function call AST node. Next, navigate
/// to the callee expression.
node_offset_call_func: Ast.Node.Offset,
/// The payload is offset from the containing base node.
/// The source location points to the field name of:
/// * a field access expression (`a.b`), or
/// * the callee of a method call (`a.b()`)
node_offset_field_name: Ast.Node.Offset,
/// The payload is offset from the containing base node.
/// The source location points to the field name of the operand ("b" node)
/// of a field initialization expression (`.a = b`)
node_offset_field_name_init: Ast.Node.Offset,
/// The source location points to the pointer of a pointer deref expression,
/// found by taking this AST node index offset from the containing
/// base node, which points to a pointer deref AST node. Next, navigate
/// to the pointer expression.
node_offset_deref_ptr: Ast.Node.Offset,
/// The source location points to the assembly source code of an inline assembly
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to inline assembly AST node. Next, navigate
/// to the asm template source code.
node_offset_asm_source: Ast.Node.Offset,
/// The source location points to the return type of an inline assembly
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to inline assembly AST node. Next, navigate
/// to the return type expression.
node_offset_asm_ret_ty: Ast.Node.Offset,
/// The source location points to the condition expression of an if
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to an if expression AST node. Next, navigate
/// to the condition expression.
node_offset_if_cond: Ast.Node.Offset,
/// The source location points to a binary expression, such as `a + b`, found
/// by taking this AST node index offset from the containing base node.
node_offset_bin_op: Ast.Node.Offset,
/// The source location points to the LHS of a binary expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a binary expression AST node. Next, navigate to the LHS.
node_offset_bin_lhs: Ast.Node.Offset,
/// The source location points to the RHS of a binary expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a binary expression AST node. Next, navigate to the RHS.
node_offset_bin_rhs: Ast.Node.Offset,
/// The source location points to the operand of a try expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a try expression AST node. Next, navigate to the
/// operand expression.
node_offset_try_operand: Ast.Node.Offset,
/// The source location points to the operand of a switch expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a switch expression AST node. Next, navigate to the operand.
node_offset_switch_operand: Ast.Node.Offset,
/// The source location points to the else prong of a switch expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a switch expression AST node. Next, navigate to the else prong.
node_offset_switch_else_prong: Ast.Node.Offset,
/// The source location points to all the ranges of a switch expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a switch expression AST node. Next, navigate to any of the
/// range nodes. The error applies to all of them.
node_offset_switch_range: Ast.Node.Offset,
/// The source location points to the align expr of a function type
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a function type AST node. Next, navigate to
/// the calling convention node.
node_offset_fn_type_align: Ast.Node.Offset,
/// The source location points to the addrspace expr of a function type
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a function type AST node. Next, navigate to
/// the calling convention node.
node_offset_fn_type_addrspace: Ast.Node.Offset,
/// The source location points to the linksection expr of a function type
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a function type AST node. Next, navigate to
/// the calling convention node.
node_offset_fn_type_section: Ast.Node.Offset,
/// The source location points to the calling convention of a function type
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a function type AST node. Next, navigate to
/// the calling convention node.
node_offset_fn_type_cc: Ast.Node.Offset,
/// The source location points to the return type of a function type
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a function type AST node. Next, navigate to
/// the return type node.
node_offset_fn_type_ret_ty: Ast.Node.Offset,
node_offset_param: Ast.Node.Offset,
token_offset_param: Ast.TokenOffset,
/// The source location points to the type expression of an `anyframe->T`
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to a `anyframe->T` expression AST node. Next, navigate
/// to the type expression.
node_offset_anyframe_type: Ast.Node.Offset,
/// The source location points to the string literal of `extern "foo"`, found
/// by taking this AST node index offset from the containing
/// base node, which points to a function prototype or variable declaration
/// expression AST node. Next, navigate to the string literal of the `extern "foo"`.
node_offset_lib_name: Ast.Node.Offset,
/// The source location points to the len expression of an `[N:S]T`
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to an `[N:S]T` expression AST node. Next, navigate
/// to the len expression.
node_offset_array_type_len: Ast.Node.Offset,
/// The source location points to the sentinel expression of an `[N:S]T`
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to an `[N:S]T` expression AST node. Next, navigate
/// to the sentinel expression.
node_offset_array_type_sentinel: Ast.Node.Offset,
/// The source location points to the elem expression of an `[N:S]T`
/// expression, found by taking this AST node index offset from the containing
/// base node, which points to an `[N:S]T` expression AST node. Next, navigate
/// to the elem expression.
node_offset_array_type_elem: Ast.Node.Offset,
/// The source location points to the operand of an unary expression.
node_offset_un_op: Ast.Node.Offset,
/// The source location points to the elem type of a pointer.
node_offset_ptr_elem: Ast.Node.Offset,
/// The source location points to the sentinel of a pointer.
node_offset_ptr_sentinel: Ast.Node.Offset,
/// The source location points to the align expr of a pointer.
node_offset_ptr_align: Ast.Node.Offset,
/// The source location points to the addrspace expr of a pointer.
node_offset_ptr_addrspace: Ast.Node.Offset,
/// The source location points to the bit-offset of a pointer.
node_offset_ptr_bitoffset: Ast.Node.Offset,
/// The source location points to the host size of a pointer.
node_offset_ptr_hostsize: Ast.Node.Offset,
/// The source location points to the type of an array or struct initializer.
node_offset_init_ty: Ast.Node.Offset,
/// The source location points to the LHS of an assignment (or assign-op, e.g. `+=`).
node_offset_store_ptr: Ast.Node.Offset,
/// The source location points to the RHS of an assignment (or assign-op, e.g. `+=`).
node_offset_store_operand: Ast.Node.Offset,
/// The source location points to the operand of a `return` statement, or
/// the `return` itself if there is no explicit operand.
node_offset_return_operand: Ast.Node.Offset,
/// The source location points to an assembly input
asm_input: struct {
/// Points to the assembly node
offset: Ast.Node.Offset,
input_index: u32,
},
/// The source location points to an assembly output
asm_output: struct {
/// Points to the assembly node
offset: Ast.Node.Offset,
output_index: u32,
},
/// Points to the assembly node
asm_clobbers: Ast.Node.Offset,
/// The source location points to a for loop input.
for_input: struct {
/// Points to the for loop AST node.
for_node_offset: Ast.Node.Offset,
/// Picks one of the inputs from the condition.
input_index: u32,
},
/// The source location points to one of the captures of a for loop, found
/// by taking this AST node index offset from the containing
/// base node, which points to one of the input nodes of a for loop.
/// Next, navigate to the corresponding capture.
for_capture_from_input: Ast.Node.Offset,
/// The source location points to the argument node of a function call.
call_arg: struct {
/// Points to the function call AST node.
call_node_offset: Ast.Node.Offset,
/// The index of the argument the source location points to.
arg_index: u32,
},
fn_proto_param: FnProtoParam,
fn_proto_param_type: FnProtoParam,
array_cat_lhs: ArrayCat,
array_cat_rhs: ArrayCat,
/// The source location points to the backing or tag type expression of
/// the container type declaration at the base node.
///
/// For 'union(enum(T))', this points to 'T', not 'enum(T)'.
container_arg,
/// The source location points to the name of the field at the given index
/// of the container type declaration at the base node.
container_field_name: u32,
/// Like `continer_field_name`, but points at the field's default value.
container_field_value: u32,
/// Like `continer_field_name`, but points at the field's type.
container_field_type: u32,
/// Like `continer_field_name`, but points at the field's alignment.
container_field_align: u32,
/// The source location points to the type of the field at the given index
/// of the tuple type declaration at `tuple_decl_node_offset`.
tuple_field_type: TupleField,
/// The source location points to the default init of the field at the given index
/// of the tuple type declaration at `tuple_decl_node_offset`.
tuple_field_init: TupleField,
/// The source location points to the given element/field of a struct or
/// array initialization expression.
init_elem: struct {
/// Points to the AST node of the initialization expression.
init_node_offset: Ast.Node.Offset,
/// The index of the field/element the source location points to.
elem_index: u32,
},
// The following source locations are like `init_elem`, but refer to a
// field with a specific name. If such a field is not given, the entire
// initialization expression is used instead.
// The `Ast.Node.Offset` points to the AST node of a builtin call, whose *second*
// argument is the init expression.
init_field_name: Ast.Node.Offset,
init_field_linkage: Ast.Node.Offset,
init_field_section: Ast.Node.Offset,
init_field_visibility: Ast.Node.Offset,
init_field_rw: Ast.Node.Offset,
init_field_locality: Ast.Node.Offset,
init_field_cache: Ast.Node.Offset,
init_field_library: Ast.Node.Offset,
init_field_thread_local: Ast.Node.Offset,
init_field_dll_import: Ast.Node.Offset,
init_field_relocation: Ast.Node.Offset,
init_field_decoration: Ast.Node.Offset,
/// The source location points to the value of an item in a specific
/// case of a `switch`.
switch_case_item: SwitchItem,
/// The source location points to the "first" value of a range item in
/// a specific case of a `switch`.
switch_case_item_range_first: SwitchItem,
/// The source location points to the "last" value of a range item in
/// a specific case of a `switch`.
switch_case_item_range_last: SwitchItem,
/// The source location points to the main capture of a specific case of
/// a `switch`.
switch_capture: SwitchCapture,
/// The source location points to the "tag" capture (second capture) of
/// a specific case of a `switch`.
switch_tag_capture: SwitchCapture,
/// The source location points to the `comptime` token on the given comptime parameter,
/// where the base node is a function declaration. The value is the parameter index.
func_decl_param_comptime: u32,
/// The source location points to the type annotation on the given function parameter,
/// where the base node is a function declaration. The value is the parameter index.
func_decl_param_ty: u32,
pub const FnProtoParam = struct {
/// The offset of the function prototype AST node.
fn_proto_node_offset: Ast.Node.Offset,
/// The index of the parameter the source location points to.
param_index: u32,
};
pub const SwitchItem = struct {
/// The offset of the switch AST node.
switch_node_offset: Ast.Node.Offset,
/// The index of the case to point to within this switch.
case_idx: Zir.UnwrappedSwitchBlock.Case.Index,
/// The index of the item to point to within this case.
item_idx: SwitchItem.Index,
pub const Index = packed struct(u32) {
kind: enum(u1) { single, range },
value: u31,
};
};
pub const SwitchCapture = struct {
/// The offset of the switch AST node.
switch_node_offset: Ast.Node.Offset,
/// The index of the case whose capture to point to.
case_idx: Zir.UnwrappedSwitchBlock.Case.Index,
};
pub const ArrayCat = struct {
/// Points to the array concat AST node.
array_cat_offset: Ast.Node.Offset,
/// The index of the element the source location points to.
elem_index: u32,
};
pub const TupleField = struct {
/// Points to the AST node of the tuple type decaration.
tuple_decl_node_offset: Ast.Node.Offset,
/// The index of the tuple field the source location points to.
elem_index: u32,
};
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
noinline fn nodeOffsetDebug(node_offset: Ast.Node.Offset) Offset {
var result: LazySrcLoc = .{ .node_offset = .{ .x = node_offset } };
result.node_offset.trace.addAddr(@returnAddress(), "init");
return result;
}
fn nodeOffsetRelease(node_offset: Ast.Node.Offset) Offset {
return .{ .node_offset = .{ .x = node_offset } };
}
/// This wraps a simple integer in debug builds so that later on we can find out
/// where in semantic analysis the value got set.
pub const TracedOffset = struct {
x: Ast.Node.Offset,
trace: std.debug.Trace = std.debug.Trace.init,
const want_tracing = false;
};
};
pub const unneeded: LazySrcLoc = .{
.base_node_inst = undefined,
.offset = .unneeded,
};
/// Returns `null` if the ZIR instruction has been lost across incremental updates.
pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } {
comptime assert(Zir.inst_tracking_version == 0);
const ip = &zcu.intern_pool;
const file_index, const zir_inst = inst: {
const info = base_node_inst.resolveFull(ip) orelse return null;
break :inst .{ info.file, info.inst };
};
const file = zcu.fileByIndex(file_index);
// If we're relative to .main_struct_inst, we know the ast node is the root and don't need to resolve the ZIR,
// which may not exist e.g. in the case of errors in ZON files.
if (zir_inst == .main_struct_inst) return .{ file, .root };
// Otherwise, make sure ZIR is loaded.
const zir = file.zir.?;
const inst = zir.instructions.get(@intFromEnum(zir_inst));
const base_node: Ast.Node.Index = switch (inst.tag) {
.declaration => inst.data.declaration.src_node,
.struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_node,
.struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node,
.extended => switch (inst.data.extended.opcode) {
.struct_decl => zir.getStructDecl(zir_inst).src_node,
.union_decl => zir.getUnionDecl(zir_inst).src_node,
.enum_decl => zir.getEnumDecl(zir_inst).src_node,
.opaque_decl => zir.getOpaqueDecl(zir_inst).src_node,
.reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.node,
.reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.node,
.reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.node,
else => unreachable,
},
else => unreachable,
};
return .{ file, base_node };
}
/// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`.
/// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates.
pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc {
return lazy.upgradeOrLost(zcu).?;
}
/// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates.
pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc {
const file, const base_node: Ast.Node.Index = resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
return .{
.file_scope = file,
.base_node = base_node,
.lazy = lazy.offset,
};
}
pub fn order(lhs: LazySrcLoc, rhs: LazySrcLoc, zcu: *Zcu) std.math.Order {
const lhs_resolved = lhs.upgradeOrLost(zcu) orelse {
// LHS source location lost, so should never be referenced. Just sort it to the end.
return .gt;
};
const rhs_resolved = rhs.upgradeOrLost(zcu) orelse {
// RHS source location lost, so should never be referenced. Just sort it to the end.
return .lt;
};
if (lhs_resolved.file_scope != rhs_resolved.file_scope) {
const lhs_path = lhs_resolved.file_scope.path;
const rhs_path = rhs_resolved.file_scope.path;
return std.math.order(@intFromEnum(lhs_path.root), @intFromEnum(rhs_path.root)).differ() orelse
std.mem.order(u8, lhs_path.sub_path, rhs_path.sub_path).differ().?;
}
const prev_prot = zcu.comp.io.swapCancelProtection(.blocked);
defer _ = zcu.comp.io.swapCancelProtection(prev_prot);
const lhs_span = lhs_resolved.span(zcu) catch |err| {
assert(err != error.Canceled); // we're protected
// Failed to read LHS, so we'll get a transient error. Just sort it to the end.
return .gt;
};
const rhs_span = rhs_resolved.span(zcu) catch |err| {
assert(err != error.Canceled); // we're protected
// Failed to read RHS, so we'll get a transient error. Just sort it to the end.
return .lt;
};
return std.math.order(lhs_span.main, rhs_span.main);
}
};
pub const SemaError = error{ OutOfMemory, Canceled, AnalysisFail };
pub const CompileError = error{
OutOfMemory,
/// The compilation update is no longer desired.
Canceled,
/// When this is returned, the compile error for the failure has already been recorded.
AnalysisFail,
/// In a comptime scope, a return instruction was encountered. This error is only seen when
/// doing a comptime function call.
ComptimeReturn,
/// In a comptime scope, a break instruction was encountered. This error is only seen when
/// evaluating a comptime block.
ComptimeBreak,
};
pub fn init(zcu: *Zcu, gpa: Allocator, io: Io, thread_count: usize) !void {
try zcu.intern_pool.init(gpa, io, thread_count);
}
pub fn deinit(zcu: *Zcu) void {
const comp = zcu.comp;
const io = comp.io;
const gpa = zcu.gpa;
{
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
if (zcu.llvm_object) |llvm_object| llvm_object.deinit();
zcu.builtin_modules.deinit(gpa);
zcu.module_roots.deinit(gpa);
for (zcu.import_table.keys()) |file_index| {
pt.destroyFile(file_index);
}
zcu.import_table.deinit(gpa);
zcu.alive_files.deinit(gpa);
for (zcu.embed_table.keys()) |embed_file| {
embed_file.path.deinit(gpa);
gpa.destroy(embed_file);
}
zcu.embed_table.deinit(gpa);
zcu.local_zir_cache.handle.close(io);
zcu.global_zir_cache.handle.close(io);
for (zcu.failed_analysis.values()) |value| value.destroy(gpa);
for (zcu.failed_codegen.values()) |value| value.destroy(gpa);
for (zcu.failed_types.values()) |value| value.destroy(gpa);
zcu.analysis_in_progress.deinit(gpa);
zcu.failed_analysis.deinit(gpa);
zcu.transitive_failed_analysis.deinit(gpa);
zcu.dependency_loops.deinit(gpa);
zcu.dependency_loop_nodes.deinit(gpa);
zcu.failed_codegen.deinit(gpa);
zcu.failed_types.deinit(gpa);
for (zcu.failed_files.values()) |value| {
if (value) |msg| gpa.free(msg);
}
zcu.failed_files.deinit(gpa);
zcu.failed_imports.deinit(gpa);
for (zcu.failed_exports.values()) |value| {
value.destroy(gpa);
}
zcu.failed_exports.deinit(gpa);
for (zcu.cimport_errors.values()) |*errs| {
errs.deinit(gpa);
}
zcu.cimport_errors.deinit(gpa);
zcu.compile_logs.deinit(gpa);
zcu.compile_log_lines.deinit(gpa);
zcu.free_compile_log_lines.deinit(gpa);
zcu.all_exports.deinit(gpa);
zcu.free_exports.deinit(gpa);
zcu.single_exports.deinit(gpa);
zcu.multi_exports.deinit(gpa);
zcu.potentially_outdated.deinit(gpa);
zcu.outdated.deinit(gpa);
zcu.outdated_ready.funcs.deinit(gpa);
zcu.outdated_ready.other.deinit(gpa);
zcu.retryable_failures.deinit(gpa);
zcu.test_functions.deinit(gpa);
for (zcu.global_assembly.values()) |s| {
gpa.free(s);
}
zcu.global_assembly.deinit(gpa);
zcu.reference_table.deinit(gpa);
zcu.all_references.deinit(gpa);
zcu.free_references.deinit(gpa);
zcu.inline_reference_frames.deinit(gpa);
zcu.free_inline_reference_frames.deinit(gpa);
zcu.type_reference_table.deinit(gpa);
zcu.all_type_references.deinit(gpa);
zcu.free_type_references.deinit(gpa);
if (zcu.resolved_references) |*r| r.deinit(gpa);
if (comp.debugIncremental()) {
zcu.incremental_debug_state.deinit(gpa);
}
}
zcu.intern_pool.deinit(gpa, io);
}
pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace {
return zcu.intern_pool.namespacePtr(index);
}
pub fn namespacePtrUnwrap(zcu: *Zcu, index: Namespace.OptionalIndex) ?*Namespace {
return zcu.namespacePtr(index.unwrap() orelse return null);
}
// TODO https://github.com/ziglang/zig/issues/8643
pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8;
pub const HackDataLayout = extern struct {
data: [8]u8 align(@alignOf(Zir.Inst.Data)),
safety_tag: u8,
};
comptime {
if (data_has_safety_tag) {
assert(@sizeOf(HackDataLayout) == @sizeOf(Zir.Inst.Data));
}
}
pub fn loadZirCache(gpa: Allocator, io: Io, cache_file: Io.File) !Zir {
var buffer: [2000]u8 = undefined;
var file_reader = cache_file.reader(io, &buffer);
return result: {
const header = file_reader.interface.takeStructPointer(Zir.Header) catch |err| break :result err;
break :result loadZirCacheBody(gpa, header.*, &file_reader.interface);
} catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
else => |e| return e,
};
}
pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader) !Zir {
var instructions: std.MultiArrayList(Zir.Inst) = .{};
errdefer instructions.deinit(gpa);
try instructions.setCapacity(gpa, header.instructions_len);
instructions.len = header.instructions_len;
var zir: Zir = .{
.instructions = instructions.toOwnedSlice(),
.string_bytes = &.{},
.extra = &.{},
};
errdefer zir.deinit(gpa);
zir.string_bytes = try gpa.alloc(u8, header.string_bytes_len);
zir.extra = try gpa.alloc(u32, header.extra_len);
const safety_buffer = if (data_has_safety_tag)
try gpa.alloc([8]u8, header.instructions_len)
else
undefined;
defer if (data_has_safety_tag) gpa.free(safety_buffer);
var vecs = [_][]u8{
@ptrCast(zir.instructions.items(.tag)),
if (data_has_safety_tag)
@ptrCast(safety_buffer)
else
@ptrCast(zir.instructions.items(.data)),
zir.string_bytes,
@ptrCast(zir.extra),
};
try cache_br.readVecAll(&vecs);
if (data_has_safety_tag) {
const tags = zir.instructions.items(.tag);
for (zir.instructions.items(.data), 0..) |*data, i| {
const union_tag = Zir.Inst.Tag.data_tags[@intFromEnum(tags[i])];
const as_struct = @as(*HackDataLayout, @ptrCast(data));
as_struct.* = .{
.safety_tag = @intFromEnum(union_tag),
.data = safety_buffer[i],
};
}
}
return zir;
}
pub fn saveZirCache(
gpa: Allocator,
cache_file_writer: *Io.File.Writer,
stat: Io.File.Stat,
zir: Zir,
) (Io.File.Writer.Error || Allocator.Error)!void {
const safety_buffer = if (data_has_safety_tag)
try gpa.alloc([8]u8, zir.instructions.len)
else
undefined;
defer if (data_has_safety_tag) gpa.free(safety_buffer);
if (data_has_safety_tag) {
// The `Data` union has a safety tag but in the file format we store it without.
for (zir.instructions.items(.data), 0..) |*data, i| {
const as_struct: *const HackDataLayout = @ptrCast(data);
safety_buffer[i] = as_struct.data;
}
}
const header: Zir.Header = .{
.instructions_len = @intCast(zir.instructions.len),
.string_bytes_len = @intCast(zir.string_bytes.len),
.extra_len = @intCast(zir.extra.len),
.stat_size = stat.size,
.stat_inode = stat.inode,
.stat_mtime = stat.mtime.toNanoseconds(),
};
var vecs = [_][]const u8{
@ptrCast((&header)[0..1]),
@ptrCast(zir.instructions.items(.tag)),
if (data_has_safety_tag)
@ptrCast(safety_buffer)
else
@ptrCast(zir.instructions.items(.data)),
zir.string_bytes,
@ptrCast(zir.extra),
};
cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => return cache_file_writer.err.?,
};
}
pub fn saveZoirCache(cache_file_writer: *Io.File.Writer, stat: Io.File.Stat, zoir: Zoir) Io.File.Writer.Error!void {
const header: Zoir.Header = .{
.nodes_len = @intCast(zoir.nodes.len),
.extra_len = @intCast(zoir.extra.len),
.limbs_len = @intCast(zoir.limbs.len),
.string_bytes_len = @intCast(zoir.string_bytes.len),
.compile_errors_len = @intCast(zoir.compile_errors.len),
.error_notes_len = @intCast(zoir.error_notes.len),
.stat_size = stat.size,
.stat_inode = stat.inode,
.stat_mtime = stat.mtime.toNanoseconds(),
};
var vecs = [_][]const u8{
@ptrCast((&header)[0..1]),
@ptrCast(zoir.nodes.items(.tag)),
@ptrCast(zoir.nodes.items(.data)),
@ptrCast(zoir.nodes.items(.ast_node)),
@ptrCast(zoir.extra),
@ptrCast(zoir.limbs),
zoir.string_bytes,
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => return cache_file_writer.err.?,
};
}
pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *Io.Reader) !Zoir {
var zoir: Zoir = .{
.nodes = .empty,
.extra = &.{},
.limbs = &.{},
.string_bytes = &.{},
.compile_errors = &.{},
.error_notes = &.{},
};
errdefer zoir.deinit(gpa);
zoir.nodes = nodes: {
var nodes: std.MultiArrayList(Zoir.Node.Repr) = .empty;
defer nodes.deinit(gpa);
try nodes.setCapacity(gpa, header.nodes_len);
nodes.len = header.nodes_len;
break :nodes nodes.toOwnedSlice();
};
zoir.extra = try gpa.alloc(u32, header.extra_len);
zoir.limbs = try gpa.alloc(std.math.big.Limb, header.limbs_len);
zoir.string_bytes = try gpa.alloc(u8, header.string_bytes_len);
zoir.compile_errors = try gpa.alloc(Zoir.CompileError, header.compile_errors_len);
zoir.error_notes = try gpa.alloc(Zoir.CompileError.Note, header.error_notes_len);
var vecs = [_][]u8{
@ptrCast(zoir.nodes.items(.tag)),
@ptrCast(zoir.nodes.items(.data)),
@ptrCast(zoir.nodes.items(.ast_node)),
@ptrCast(zoir.extra),
@ptrCast(zoir.limbs),
zoir.string_bytes,
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
try cache_br.readVecAll(&vecs);
return zoir;
}
pub fn markDependeeOutdated(
zcu: *Zcu,
/// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO.
/// However, when we discover during analysis that something was outdated, the `Dependee` was already
/// marked as PO, so we need to decrement the PO dep count for each depender.
marked_po: enum { not_marked_po, marked_po },
dependee: InternPool.Dependee,
) !void {
const gpa = zcu.comp.gpa;
deps_log.debug("outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
var it = zcu.intern_pool.dependencyIterator(dependee);
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(zcu.comp.io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(zcu.comp.io);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
switch (marked_po) {
.not_marked_po => {},
.marked_po => {
po_dep_count.* -= 1;
deps_log.debug("outdated {f} => already outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
if (po_dep_count.* == 0) {
deps_log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
switch (depender.unwrap()) {
.func => |func| try zcu.outdated_ready.funcs.put(gpa, func, {}),
else => try zcu.outdated_ready.other.put(gpa, depender, {}),
}
}
},
}
continue;
}
const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender);
const new_po_dep_count = switch (marked_po) {
.not_marked_po => if (opt_po_entry) |e| e.value else 0,
.marked_po => if (opt_po_entry) |e| e.value - 1 else {
// This `AnalUnit` has already been re-analyzed this update, and registered a dependency
// on this thing, but already has sufficiently up-to-date information. Nothing to do.
continue;
},
};
try zcu.outdated.putNoClobber(
gpa,
depender,
new_po_dep_count,
);
deps_log.debug("outdated {f} => new outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
if (new_po_dep_count == 0) {
deps_log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
switch (depender.unwrap()) {
.func => |func| try zcu.outdated_ready.funcs.put(gpa, func, {}),
else => try zcu.outdated_ready.other.put(gpa, depender, {}),
}
}
// If this is a Decl and was not previously PO, we must recursively
// mark dependencies on its tyval as PO.
if (opt_po_entry == null) {
assert(marked_po == .not_marked_po);
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
}
}
}
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(zcu.comp.io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(zcu.comp.io);
return markPoDependeeUpToDateInner(zcu, dependee);
}
/// Assumes that `zcu.outdated_lock` is already held exclusively.
fn markPoDependeeUpToDateInner(zcu: *Zcu, dependee: InternPool.Dependee) !void {
const gpa = zcu.comp.gpa;
deps_log.debug("up-to-date dependee: {f}", .{zcu.fmtDependee(dependee)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
// This depender is already outdated, but it now has one
// less PO dependency!
po_dep_count.* -= 1;
deps_log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
if (po_dep_count.* == 0) {
deps_log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
switch (depender.unwrap()) {
.func => |func| try zcu.outdated_ready.funcs.put(gpa, func, {}),
else => try zcu.outdated_ready.other.put(gpa, depender, {}),
}
}
continue;
}
// This depender is definitely at least PO, because this Decl was just analyzed
// due to being outdated.
const ptr = zcu.potentially_outdated.getPtr(depender) orelse {
// This dependency has been registered during in-progress analysis, but the unit is
// not in `potentially_outdated` because analysis is in-progress. Nothing to do.
continue;
};
if (ptr.* > 1) {
ptr.* -= 1;
deps_log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
continue;
}
deps_log.debug("up-to-date {f} => {f} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
// This dependency is no longer PO, i.e. is known to be up-to-date.
assert(zcu.potentially_outdated.swapRemove(depender));
// If this is a Decl, we must recursively mark dependencies on its tyval
// as no longer PO.
switch (depender.unwrap()) {
.@"comptime" => {},
.nav_val => |nav| try zcu.markPoDependeeUpToDateInner(.{ .nav_val = nav }),
.nav_ty => |nav| try zcu.markPoDependeeUpToDateInner(.{ .nav_ty = nav }),
.type_layout => |ty| try zcu.markPoDependeeUpToDateInner(.{ .type_layout = ty }),
.struct_defaults => |ty| try zcu.markPoDependeeUpToDateInner(.{ .struct_defaults = ty }),
.func => |func| try zcu.markPoDependeeUpToDateInner(.{ .func_ies = func }),
.memoized_state => |stage| try zcu.markPoDependeeUpToDateInner(.{ .memoized_state = stage }),
}
}
}
/// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may
/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES.
///
/// Assumes that `zcu.outdated_lock` is already held exclusively.
fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) Allocator.Error!void {
const gpa = zcu.comp.gpa;
const ip = &zcu.intern_pool;
const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) {
.@"comptime" => return, // analysis of a comptime decl can't outdate any dependencies
.nav_val => |nav| .{ .nav_val = nav },
.nav_ty => |nav| .{ .nav_ty = nav },
.type_layout => |ty| .{ .type_layout = ty },
.struct_defaults => |ty| .{ .struct_defaults = ty },
.func => |func_index| .{ .func_ies = func_index },
.memoized_state => |stage| .{ .memoized_state = stage },
};
deps_log.debug("potentially outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
var it = ip.dependencyIterator(dependee);
while (it.next()) |po| {
if (zcu.outdated.getPtr(po)) |po_dep_count| {
// This dependency is already outdated, but it now has one more PO dependency.
if (po_dep_count.* == 0) {
switch (po.unwrap()) {
.func => |func| _ = zcu.outdated_ready.funcs.swapRemove(func),
else => _ = zcu.outdated_ready.other.swapRemove(po),
}
}
po_dep_count.* += 1;
deps_log.debug("po {f} => {f} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
continue;
}
if (zcu.potentially_outdated.getPtr(po)) |n| {
// There is now one more PO dependency.
n.* += 1;
deps_log.debug("po {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
continue;
}
try zcu.potentially_outdated.putNoClobber(gpa, po, 1);
deps_log.debug("po {f} => {f} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
try zcu.markTransitiveDependersPotentiallyOutdated(po);
}
}
/// Selects an outdated `AnalUnit` to analyze next. Called from the main semantic analysis loop when
/// there is no work immediately queued. The unit is chosen such that it is unlikely to require any
/// recursive analysis (all of its previously-marked dependencies are already up-to-date), because
/// recursive analysis can cause over-analysis on incremental updates.
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
// We prioritize functions, because the sooner they get analyzed, the sooner they can be send to
// the codegen backend and linker, which are usually running in parallel (so this can increase
// parallelism).
// TODO: perhaps we should also experiment with *avoiding* functions if the codegen/link queue
// is backed up (for instance due to a very large function). That could help minimize blocking
// on the main thread in `CodegenTaskPool.start` waiting for the linker to catch up.
if (zcu.outdated_ready.funcs.count() > 0) {
const unit: AnalUnit = .wrap(.{ .func = zcu.outdated_ready.funcs.keys()[0] });
log.debug("findOutdatedToAnalyze: {f}", .{zcu.fmtAnalUnit(unit)});
return unit;
}
if (zcu.outdated_ready.other.count() > 0) {
const unit = zcu.outdated_ready.other.keys()[0];
log.debug("findOutdatedToAnalyze: {f}", .{zcu.fmtAnalUnit(unit)});
return unit;
}
// Usually, getting here means that everything is up-to-date, so there is no more work to do. We
// will see that `zcu.outdated` and `zcu.potentially_outdated` are both empty.
//
// However, if a previous update had a dependency loop compile error, there is a cycle in the
// dependency graph (which is usually acyclic), which can cause a scenario where no unit appears
// to be ready, because they're all waiting for the next in the loop to be up-to-date. In that
// case, we usually have to just bite the bullet and analyze one of them. An exception is if
// `zcu.outdated` is empty but `zcu.potentially_outdated` is non-empty: in that case, the only
// possible situation is a cycle where everything is actually up-to-date, so we can clear out
// `zcu.potentially_outdated` and we are done.
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(zcu.comp.io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(zcu.comp.io);
if (zcu.outdated.count() == 0) {
// Everything is up-to-date. There could be lingering entries in `zcu.potentially_outdated`
// from a dependency loop on a previous update.
zcu.potentially_outdated.clearRetainingCapacity();
log.debug("findOutdatedToAnalyze: all up-to-date", .{});
return null;
}
const unit = zcu.outdated.keys()[0];
log.debug("findOutdatedToAnalyze: dependency loop affecting {d} units, selected {f}", .{
zcu.outdated.count(),
zcu.fmtAnalUnit(unit),
});
return unit;
}
/// During an incremental update, before semantic analysis, call this to flush all values from
/// `retryable_failures` and mark them as outdated so they get re-analyzed.
pub fn flushRetryableFailures(zcu: *Zcu) !void {
const comp = zcu.comp;
const gpa = comp.gpa;
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(comp.io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(comp.io);
for (zcu.retryable_failures.items) |depender| {
if (zcu.outdated.contains(depender)) continue;
if (zcu.potentially_outdated.fetchSwapRemove(depender)) |kv| {
// This AnalUnit was already PO, but we now consider it outdated.
// Any transitive dependencies are already marked PO.
try zcu.outdated.put(gpa, depender, kv.value);
continue;
}
// This AnalUnit was not marked PO, but is now outdated. Mark it as
// such, then recursively mark transitive dependencies as PO.
try zcu.outdated.put(gpa, depender, 0);
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
}
zcu.retryable_failures.clearRetainingCapacity();
}
pub fn mapOldZirToNew(
gpa: Allocator,
old_zir: Zir,
new_zir: Zir,
inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
) Allocator.Error!void {
// Contain ZIR indexes of namespace declaration instructions, e.g. struct_decl, union_decl, etc.
// Not `declaration`, as this does not create a namespace.
const MatchedZirDecl = struct {
old_inst: Zir.Inst.Index,
new_inst: Zir.Inst.Index,
};
var pending_matched_type_decls: std.ArrayList(MatchedZirDecl) = .empty;
defer pending_matched_type_decls.deinit(gpa);
// Used as temporary buffers for namespace declaration instructions
var old_contents: Zir.DeclContents = .init;
defer old_contents.deinit(gpa);
var new_contents: Zir.DeclContents = .init;
defer new_contents.deinit(gpa);
// Map the main struct inst to start off with.
try pending_matched_type_decls.append(gpa, .{
.old_inst = .main_struct_inst,
.new_inst = .main_struct_inst,
});
while (pending_matched_type_decls.pop()) |match_item| {
// There are some properties of type declarations which cannot change across incremental
// updates. If they have, we need to ignore this mapping. These properties are essentially
// everything passed into `InternPool.getDeclaredStructType` (likewise for unions, enums,
// and opaques).
const old_tag = old_zir.instructions.items(.data)[@intFromEnum(match_item.old_inst)].extended.opcode;
const new_tag = new_zir.instructions.items(.data)[@intFromEnum(match_item.new_inst)].extended.opcode;
if (old_tag != new_tag) continue;
switch (old_tag) {
.struct_decl => {
const old = old_zir.getStructDecl(match_item.old_inst);
const new = new_zir.getStructDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
if (old.field_names.len != new.field_names.len) continue;
if (old.layout != new.layout) continue;
const old_any_field_aligns = old.field_align_body_lens != null;
const old_any_field_defaults = old.field_default_body_lens != null;
const old_any_comptime_fields = old.field_comptime_bits != null;
const old_explicit_backing_int = old.backing_int_type_body != null;
const new_any_field_aligns = new.field_align_body_lens != null;
const new_any_field_defaults = new.field_default_body_lens != null;
const new_any_comptime_fields = new.field_comptime_bits != null;
const new_explicit_backing_int = new.backing_int_type_body != null;
if (old_any_field_aligns != new_any_field_aligns) continue;
if (old_any_field_defaults != new_any_field_defaults) continue;
if (old_any_comptime_fields != new_any_comptime_fields) continue;
if (old_explicit_backing_int != new_explicit_backing_int) continue;
},
.union_decl => {
const old = old_zir.getUnionDecl(match_item.old_inst);
const new = new_zir.getUnionDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
if (old.field_names.len != new.field_names.len) continue;
if (old.kind != new.kind) continue;
const old_any_field_aligns = old.field_align_body_lens != null;
const new_any_field_aligns = new.field_align_body_lens != null;
if (old_any_field_aligns != new_any_field_aligns) continue;
},
.enum_decl => {
const old = old_zir.getEnumDecl(match_item.old_inst);
const new = new_zir.getEnumDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
if (old.field_names.len != new.field_names.len) continue;
if (old.nonexhaustive != new.nonexhaustive) continue;
const old_explicit_tag_type = old.tag_type_body != null;
const new_explicit_tag_type = new.tag_type_body != null;
if (old_explicit_tag_type != new_explicit_tag_type) continue;
},
.opaque_decl => {
const old = old_zir.getOpaqueDecl(match_item.old_inst);
const new = new_zir.getOpaqueDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
},
else => unreachable,
}
// Match the container declaration itself
try inst_map.put(gpa, match_item.old_inst, match_item.new_inst);
{
// First, map the fields...
try old_zir.findTrackableFields(gpa, &old_contents, match_item.old_inst);
try new_zir.findTrackableFields(gpa, &new_contents, match_item.new_inst);
// This isn't a `.declaration`, so we shouldn't see a function declaration.
assert(old_contents.func_decl == null);
assert(new_contents.func_decl == null);
// We don't have any smart way of matching up these instructions, so we correlate them based on source order
// in their respective arrays.
const num_type_decls = @min(old_contents.type_decls.items.len, new_contents.type_decls.items.len);
try pending_matched_type_decls.ensureUnusedCapacity(gpa, @intCast(num_type_decls));
for (
old_contents.type_decls.items[0..num_type_decls],
new_contents.type_decls.items[0..num_type_decls],
) |old_inst, new_inst| {
pending_matched_type_decls.appendAssumeCapacity(.{ .old_inst = old_inst, .new_inst = new_inst });
}
const num_other = @min(old_contents.other.items.len, new_contents.other.items.len);
try inst_map.ensureUnusedCapacity(gpa, @intCast(num_other));
for (
old_contents.other.items[0..num_other],
new_contents.other.items[0..num_other],
) |old_inst, new_inst| {
// These instructions don't have declarations, so we just modify `inst_map` directly.
inst_map.putAssumeCapacity(old_inst, new_inst);
}
}
// Maps decl name to `declaration` instruction.
var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
defer named_decls.deinit(gpa);
// Maps test name to `declaration` instruction.
var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
defer named_tests.deinit(gpa);
// Maps test name to `declaration` instruction.
var named_decltests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
defer named_decltests.deinit(gpa);
// All unnamed tests, in order, for a best-effort match.
var unnamed_tests: std.ArrayList(Zir.Inst.Index) = .empty;
defer unnamed_tests.deinit(gpa);
// All comptime declarations, in order, for a best-effort match.
var comptime_decls: std.ArrayList(Zir.Inst.Index) = .empty;
defer comptime_decls.deinit(gpa);
for (old_zir.typeDecls(match_item.old_inst)) |old_decl_inst| {
const old_decl = old_zir.getDeclaration(old_decl_inst);
switch (old_decl.kind) {
.@"comptime" => try comptime_decls.append(gpa, old_decl_inst),
.unnamed_test => try unnamed_tests.append(gpa, old_decl_inst),
.@"test" => try named_tests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
.decltest => try named_decltests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
.@"const", .@"var" => try named_decls.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
}
}
var unnamed_test_idx: u32 = 0;
var comptime_decl_idx: u32 = 0;
for (new_zir.typeDecls(match_item.new_inst)) |new_decl_inst| {
const new_decl = new_zir.getDeclaration(new_decl_inst);
// Attempt to match this to a declaration in the old ZIR:
// * For named declarations (`const`/`var`/`fn`), we match based on name.
// * For named tests (`test "foo"`) and decltests (`test foo`), we also match based on name.
// * For unnamed tests, we match based on order.
// * For comptime blocks, we match based on order.
// If we cannot match this declaration, we can't match anything nested inside of it either, so we just `continue`.
const old_decl_inst = switch (new_decl.kind) {
.@"comptime" => inst: {
if (comptime_decl_idx == comptime_decls.items.len) continue;
defer comptime_decl_idx += 1;
break :inst comptime_decls.items[comptime_decl_idx];
},
.unnamed_test => inst: {
if (unnamed_test_idx == unnamed_tests.items.len) continue;
defer unnamed_test_idx += 1;
break :inst unnamed_tests.items[unnamed_test_idx];
},
.@"test" => inst: {
const name = new_zir.nullTerminatedString(new_decl.name);
break :inst named_tests.get(name) orelse continue;
},
.decltest => inst: {
const name = new_zir.nullTerminatedString(new_decl.name);
break :inst named_decltests.get(name) orelse continue;
},
.@"const", .@"var" => inst: {
const name = new_zir.nullTerminatedString(new_decl.name);
break :inst named_decls.get(name) orelse continue;
},
};
// Match the `declaration` instruction
try inst_map.put(gpa, old_decl_inst, new_decl_inst);
// Find trackable instructions within this declaration
try old_zir.findTrackable(gpa, &old_contents, old_decl_inst);
try new_zir.findTrackable(gpa, &new_contents, new_decl_inst);
// We don't have any smart way of matching up these instructions, so we correlate them based on source order
// in their respective arrays.
const num_type_decls = @min(old_contents.type_decls.items.len, new_contents.type_decls.items.len);
try pending_matched_type_decls.ensureUnusedCapacity(gpa, @intCast(num_type_decls));
for (
old_contents.type_decls.items[0..num_type_decls],
new_contents.type_decls.items[0..num_type_decls],
) |old_inst, new_inst| {
pending_matched_type_decls.appendAssumeCapacity(.{ .old_inst = old_inst, .new_inst = new_inst });
}
const num_other = @min(old_contents.other.items.len, new_contents.other.items.len);
try inst_map.ensureUnusedCapacity(gpa, @intCast(num_other));
for (
old_contents.other.items[0..num_other],
new_contents.other.items[0..num_other],
) |old_inst, new_inst| {
// These instructions don't have declarations, so we just modify `inst_map` directly.
inst_map.putAssumeCapacity(old_inst, new_inst);
}
if (old_contents.func_decl) |old_func_inst| {
if (new_contents.func_decl) |new_func_inst| {
// There are no declarations on a function either, so again, we just directly add it to `inst_map`.
try inst_map.put(gpa, old_func_inst, new_func_inst);
}
}
}
}
}
/// Ensure this function's body is or will be analyzed and emitted. This should
/// be called whenever a potential runtime call of a function is seen.
///
/// The caller is responsible for ensuring the function decl itself is already
/// analyzed, and for ensuring it can exist at runtime (see
/// `Type.fnHasRuntimeBitsSema`). This function does *not* guarantee that the body
/// will be analyzed when it returns: for that, see `PerThread.ensureFuncBodyUpToDate`.
pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func: InternPool.Index) !void {
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
assert(func == ip.unwrapCoercedFunc(func)); // analyze the body of the original function, not a coerced one
if (ip.setWantRuntimeFnAnalysis(io, func)) {
// This is the first reference to this function, so we must ensure it will be analyzed.
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(zcu.comp.io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(zcu.comp.io);
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
try zcu.outdated_ready.funcs.ensureUnusedCapacity(gpa, 1);
zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .func = func }), 0);
zcu.outdated_ready.funcs.putAssumeCapacityNoClobber(func, {});
}
}
pub fn ensureNavValAnalysisQueued(zcu: *Zcu, nav: InternPool.Nav.Index) !void {
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
if (ip.setWantNavAnalysis(io, nav)) {
// This is the first reference to this function, so we must ensure it will be analyzed.
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(zcu.comp.io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(zcu.comp.io);
try zcu.outdated.ensureUnusedCapacity(gpa, 2);
try zcu.outdated_ready.other.ensureUnusedCapacity(gpa, 2);
zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .nav_val = nav }), 0);
zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .nav_ty = nav }), 0);
zcu.outdated_ready.other.putAssumeCapacityNoClobber(.wrap(.{ .nav_val = nav }), {});
zcu.outdated_ready.other.putAssumeCapacityNoClobber(.wrap(.{ .nav_ty = nav }), {});
}
}
/// Called when an `InternPool.ComptimeUnit` is first created to mark it as outdated so that it will
/// be semantically analyzed.
pub fn queueComptimeUnitAnalysis(zcu: *Zcu, cu: InternPool.ComptimeUnit.Id) Allocator.Error!void {
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(io);
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
try zcu.outdated_ready.other.ensureUnusedCapacity(gpa, 1);
zcu.outdated.putAssumeCapacityNoClobber(unit, 0);
zcu.outdated_ready.other.putAssumeCapacityNoClobber(unit, {});
}
/// If `unit` was marked as outdated or porentially outdated, clears that status and returns `true`.
/// Otherwise, returns `false`.
pub fn clearOutdatedState(zcu: *Zcu, unit: AnalUnit) bool {
const io = zcu.comp.io;
if (std.debug.runtime_safety) zcu.outdated_lock.lockUncancelable(io);
defer if (std.debug.runtime_safety) zcu.outdated_lock.unlock(io);
if (zcu.outdated.fetchSwapRemove(unit)) |kv| {
const was_ready = switch (unit.unwrap()) {
.func => |func| zcu.outdated_ready.funcs.swapRemove(func),
else => zcu.outdated_ready.other.swapRemove(unit),
};
if (kv.value == 0) {
assert(was_ready);
} else {
assert(!was_ready);
}
return true;
} else if (zcu.potentially_outdated.swapRemove(unit)) {
return true;
} else {
return false;
}
}
/// This function takes a `*const Zcu` and `@constCast`s it so that it can be called from functions
/// in `Type` which otherwise do not modify the `Zcu`.
pub fn assertUpToDate(zcu: *const Zcu, unit: AnalUnit) void {
if (!std.debug.runtime_safety) return;
const io = zcu.comp.io;
@constCast(zcu).outdated_lock.lockSharedUncancelable(io);
defer @constCast(zcu).outdated_lock.unlockShared(io);
assert(!zcu.outdated.contains(unit));
assert(!zcu.potentially_outdated.contains(unit));
}
pub const ImportResult = struct {
/// Whether `file` has been newly created; in other words, whether this is the first import of
/// this file. This should only be `true` when importing files during AstGen. After that, all
/// files should have already been discovered.
is_new: bool,
/// `file.mod` is not populated by this function, so if `is_new`, then it is `undefined`.
file: *Zcu.File,
file_index: File.Index,
/// If this import was a simple file path, this is `null`; the imported file should exist within
/// the importer's module. Otherwise, it's the module which the import resolved to. This module
/// could match the module of `cur_file`, since a module can depend on itself.
module: ?*Package.Module,
};
/// Prepares `unit` for re-analysis by clearing all of the following state:
/// * Compile errors associated with `unit`
/// * Compile logs associated with `unit`
/// * Exports performed by `unit`
/// * Dependencies from `unit` on other things
/// * References from `unit` to other units
/// Delete all references in `reference_table` which are caused by `unit`, and all dependencies it
/// has. Called in preparation for re-analysis, which will recreate references and dependencies.
/// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated.
pub fn resetUnit(zcu: *Zcu, unit: AnalUnit) void {
const gpa = zcu.comp.gpa;
if (!dev.env.supports(.incremental)) {
// This is the first time `unit` is being analyzed, so there is no stale data to clear.
return;
}
// Compile errors
if (zcu.failed_analysis.fetchSwapRemove(unit)) |kv| {
kv.value.destroy(gpa);
} else if (zcu.dependency_loop_nodes.swapRemove(unit)) {
_ = zcu.dependency_loops.swapRemove(unit);
_ = zcu.transitive_failed_analysis.swapRemove(unit);
} else {
_ = zcu.transitive_failed_analysis.swapRemove(unit);
}
// Compile logs
if (zcu.compile_logs.fetchSwapRemove(unit)) |kv| {
var opt_line_idx = kv.value.first_line.toOptional();
while (opt_line_idx.unwrap()) |line_idx| {
zcu.free_compile_log_lines.append(gpa, line_idx) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break;
};
opt_line_idx = line_idx.get(zcu).next;
}
}
// Exports
exports: {
const base: u32, const len: u32 = index: {
if (zcu.single_exports.fetchSwapRemove(unit)) |kv| {
break :index .{ @intFromEnum(kv.value), 1 };
}
if (zcu.multi_exports.fetchSwapRemove(unit)) |kv| {
break :index .{ kv.value.index, kv.value.len };
}
break :exports;
};
for (zcu.all_exports.items[base..][0..len], base..) |exp, exp_index_usize| {
const exp_index: Export.Index = @enumFromInt(exp_index_usize);
if (zcu.llvm_object) |llvm_object| {
_ = llvm_object; // TODO: delete exports from LLVM
} else if (zcu.comp.bin_file) |lf| {
lf.deleteExport(exp.exported, exp.opts.name);
}
if (zcu.failed_exports.fetchSwapRemove(exp_index)) |failed_kv| {
failed_kv.value.destroy(gpa);
}
}
zcu.free_exports.ensureUnusedCapacity(gpa, len) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break :exports;
};
for (base..base + len) |exp_index| {
zcu.free_exports.appendAssumeCapacity(@enumFromInt(exp_index));
}
}
// Dependencies
zcu.intern_pool.removeDependenciesForDepender(gpa, unit);
// References
zcu.clearCachedResolvedReferences();
unit_refs: {
const kv = zcu.reference_table.fetchSwapRemove(unit) orelse break :unit_refs;
var idx = kv.value;
while (idx != std.math.maxInt(u32)) {
const ref = zcu.all_references.items[idx];
zcu.free_references.append(gpa, idx) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break :unit_refs;
};
idx = ref.next;
var opt_inline_frame = ref.inline_frame;
while (opt_inline_frame.unwrap()) |inline_frame| {
// The same inline frame could be used multiple times by one unit. We need to
// detect this case to avoid adding it to `free_inline_reference_frames` more
// than once. We do that by setting `parent` to itself as a marker.
if (inline_frame.ptr(zcu).parent == inline_frame.toOptional()) break;
zcu.free_inline_reference_frames.append(gpa, inline_frame) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break :unit_refs;
};
opt_inline_frame = inline_frame.ptr(zcu).parent;
inline_frame.ptr(zcu).parent = inline_frame.toOptional(); // signal to code above
}
}
}
type_refs: {
const kv = zcu.type_reference_table.fetchSwapRemove(unit) orelse break :type_refs;
var idx = kv.value;
while (idx != std.math.maxInt(u32)) {
zcu.free_type_references.append(gpa, idx) catch {
// This space will be reused eventually, so we need not propagate this error.
// Just leak it for now, and let GC reclaim it later on.
break :type_refs;
};
idx = zcu.all_type_references.items[idx].next;
}
}
}
pub fn addInlineReferenceFrame(zcu: *Zcu, frame: InlineReferenceFrame) Allocator.Error!Zcu.InlineReferenceFrame.Index {
const frame_idx: InlineReferenceFrame.Index = zcu.free_inline_reference_frames.pop() orelse idx: {
_ = try zcu.inline_reference_frames.addOne(zcu.gpa);
break :idx @enumFromInt(zcu.inline_reference_frames.items.len - 1);
};
frame_idx.ptr(zcu).* = frame;
return frame_idx;
}
pub fn addUnitReference(
zcu: *Zcu,
src_unit: AnalUnit,
referenced_unit: AnalUnit,
ref_src: LazySrcLoc,
inline_frame: InlineReferenceFrame.Index.Optional,
) Allocator.Error!void {
const gpa = zcu.gpa;
zcu.clearCachedResolvedReferences();
try zcu.reference_table.ensureUnusedCapacity(gpa, 1);
const ref_idx = zcu.free_references.pop() orelse idx: {
_ = try zcu.all_references.addOne(gpa);
break :idx zcu.all_references.items.len - 1;
};
errdefer comptime unreachable;
const gop = zcu.reference_table.getOrPutAssumeCapacity(src_unit);
zcu.all_references.items[ref_idx] = .{
.referenced = referenced_unit,
.next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
.src = ref_src,
.inline_frame = inline_frame,
};
gop.value_ptr.* = @intCast(ref_idx);
}
pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void {
const gpa = zcu.gpa;
zcu.clearCachedResolvedReferences();
try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1);
const ref_idx = zcu.free_type_references.pop() orelse idx: {
_ = try zcu.all_type_references.addOne(gpa);
break :idx zcu.all_type_references.items.len - 1;
};
errdefer comptime unreachable;
const gop = zcu.type_reference_table.getOrPutAssumeCapacity(src_unit);
zcu.all_type_references.items[ref_idx] = .{
.referenced = referenced_type,
.next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
.src = ref_src,
};
gop.value_ptr.* = @intCast(ref_idx);
}
fn clearCachedResolvedReferences(zcu: *Zcu) void {
if (zcu.resolved_references) |*r| r.deinit(zcu.gpa);
zcu.resolved_references = null;
}
pub fn errorSetBits(zcu: *const Zcu) u16 {
const target = zcu.getTarget();
if (zcu.error_limit == 0) return 0;
if (target.cpu.arch.isSpirV()) {
// As expected by https://github.com/Snektron/zig-spirv-test-executor
if (zcu.comp.config.is_test) return 32;
}
return @as(u16, std.math.log2_int(ErrorInt, zcu.error_limit)) + 1;
}
pub fn errNote(
zcu: *Zcu,
src_loc: LazySrcLoc,
parent: *ErrorMsg,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
const msg = try std.fmt.allocPrint(zcu.gpa, format, args);
errdefer zcu.gpa.free(msg);
parent.notes = try zcu.gpa.realloc(parent.notes, parent.notes.len + 1);
parent.notes[parent.notes.len - 1] = .{
.src_loc = src_loc,
.msg = msg,
};
}
/// Deprecated. There is no global target for a Zig Compilation Unit. Instead,
/// look up the target based on the Module that contains the source code being
/// analyzed.
pub fn getTarget(zcu: *const Zcu) *const Target {
return &zcu.root_mod.resolved_target.result;
}
/// Deprecated. There is no global optimization mode for a Zig Compilation
/// Unit. Instead, look up the optimization mode based on the Module that
/// contains the source code being analyzed.
pub fn optimizeMode(zcu: *const Zcu) std.builtin.OptimizeMode {
return zcu.root_mod.optimize_mode;
}
pub fn handleUpdateExports(
zcu: *Zcu,
export_indices: []const Export.Index,
result: link.File.UpdateExportsError!void,
) Allocator.Error!void {
const gpa = zcu.gpa;
result catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
const export_idx = export_indices[0];
const new_export = export_idx.ptr(zcu);
new_export.status = .failed_retryable;
try zcu.failed_exports.ensureUnusedCapacity(gpa, 1);
const msg = try ErrorMsg.create(gpa, new_export.src, "unable to export: {s}", .{@errorName(err)});
zcu.failed_exports.putAssumeCapacityNoClobber(export_idx, msg);
},
};
}
pub fn addGlobalAssembly(zcu: *Zcu, unit: AnalUnit, source: []const u8) !void {
const gpa = zcu.gpa;
const gop = try zcu.global_assembly.getOrPut(gpa, unit);
if (gop.found_existing) {
const new_value = try std.fmt.allocPrint(gpa, "{s}\n{s}", .{ gop.value_ptr.*, source });
gpa.free(gop.value_ptr.*);
gop.value_ptr.* = new_value;
} else {
gop.value_ptr.* = try gpa.dupe(u8, source);
}
}
pub const Feature = enum {
/// When this feature is enabled, Sema will emit calls to
/// `std.builtin.panic` functions for things like safety checks and
/// unreachables. Otherwise traps will be emitted.
panic_fn,
/// When this feature is enabled, Sema will insert tracer functions for gathering a stack
/// trace for error returns.
error_return_trace,
/// When this feature is enabled, Sema will emit the `is_named_enum_value` AIR instructions
/// and use it to check for corrupt switches. Backends currently need to implement their own
/// logic to determine whether an enum value is in the set of named values.
is_named_enum_value,
error_set_has_value,
field_reordering,
/// In theory, backends are supposed to work like this:
///
/// * The AIR emitted by `Sema` is converted into MIR by `codegen.generateFunction`. This pass
/// is "pure", in that it does not depend on or modify any external mutable state.
///
/// * That MIR is sent to the linker, which calls `codegen.emitFunction` to convert the MIR to
/// finalized machine code. This process is permitted to query and modify linker state.
///
/// * The linker stores the resulting machine code in the binary as needed.
///
/// The first stage described above can run in parallel to the rest of the compiler, and even to
/// other code generation work; we can run as many codegen threads as we want in parallel because
/// of the fact that this pass is pure. Emit and link must be single-threaded, but are generally
/// very fast, so that isn't a problem.
///
/// Unfortunately, some code generation implementations currently query and/or mutate linker state
/// or even (in the case of the LLVM backend) semantic analysis state. Such backends cannot be run
/// in parallel with each other, with linking, or (potentially) with semantic analysis.
///
/// Additionally, some backends continue to need the AIR in the "emit" stage, despite this pass
/// operating on MIR. This complicates memory management under the threading model above.
///
/// These are both **bugs** in backend implementations, left over from legacy code. However, they
/// are difficult to fix. So, this `Feature` currently guards correct threading of code generation:
///
/// * With this feature enabled, the backend is threaded as described above. The "emit" stage does
/// not have access to AIR (it will be `undefined`; see `codegen.emitFunction`).
///
/// * With this feature disabled, semantic analysis, code generation, and linking all occur on the
/// same thread, and the "emit" stage has access to AIR.
separate_thread,
};
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
return target_util.backendSupportsFeature(backend, feature);
}
pub const AtomicPtrAlignmentError = error{
FloatTooBig,
IntTooBig,
BadType,
OutOfMemory,
};
pub const AtomicPtrAlignmentDiagnostics = struct {
bits: u16 = undefined,
max_bits: u16 = undefined,
};
/// Returns the alignment required for the target to perform atomic operations on type `ty` (that
/// is, the required align attribute on the pointer). If the ABI alignment of `ty` is sufficient,
/// returns `.none`.
// TODO this function does not take into account CPU features, which can affect
// this value. Audit this!
pub fn atomicPtrAlignment(
zcu: *Zcu,
ty: Type,
diags: *AtomicPtrAlignmentDiagnostics,
) AtomicPtrAlignmentError!Alignment {
const target = zcu.getTarget();
const max_atomic_bits: u16 = switch (target.cpu.arch) {
.ez80,
=> 8,
.aarch64,
.aarch64_be,
=> 128,
.mips64,
.mips64el,
=> 64, // N32 should be 64, not 32.
.x86_64 => if (target.cpu.has(.x86, .cx16)) 128 else 64, // x32 should be 64 or 128, not 32.
else => target.ptrBitWidth(),
};
if (ty.toIntern() == .bool_type) return .none;
if (ty.isRuntimeFloat()) {
const bit_count = ty.floatBits(target);
if (bit_count > max_atomic_bits) {
diags.* = .{
.bits = bit_count,
.max_bits = max_atomic_bits,
};
return error.FloatTooBig;
}
return .none;
}
if (switch (ty.zigTypeTag(zcu)) {
.int, .@"enum" => true,
.@"struct" => ty.containerLayout(zcu) == .@"packed",
else => false,
}) {
assert(ty.isAbiInt(zcu));
const bit_count = ty.intInfo(zcu).bits;
if (bit_count > max_atomic_bits) {
diags.* = .{
.bits = bit_count,
.max_bits = max_atomic_bits,
};
return error.IntTooBig;
}
return .none;
}
if (ty.isPtrAtRuntime(zcu)) return .none;
return error.BadType;
}
/// Returns null if `ty` is not a struct.
pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
.struct_type => ip.loadStructType(ty.ip_index),
else => null,
};
}
pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
const s = zcu.typeToStruct(ty) orelse return null;
if (s.layout != .@"packed") return null;
return s;
}
/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
/// into the packed struct InternPool data rather than computing this on the
/// fly, however it was found to perform worse when measured on real world
/// projects.
pub fn structPackedFieldBitOffset(
zcu: *Zcu,
struct_type: InternPool.LoadedStructType,
field_index: u32,
) u16 {
const ip = &zcu.intern_pool;
assert(struct_type.layout == .@"packed");
var bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
return @intCast(bit_sum);
}
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
bit_sum += field_ty.bitSize(zcu);
}
unreachable; // index out of bounds
}
pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
.union_type => ip.loadUnionType(ty.ip_index),
else => null,
};
}
pub fn typeToFunc(zcu: *const Zcu, ty: Type) ?InternPool.Key.FuncType {
if (ty.ip_index == .none) return null;
return zcu.intern_pool.indexToFuncType(ty.toIntern());
}
pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index {
return zcu.intern_pool.iesFuncIndex(ies_index);
}
pub fn funcInfo(zcu: *const Zcu, func_index: InternPool.Index) InternPool.Key.Func {
return zcu.intern_pool.toFunc(func_index);
}
pub const UnionLayout = struct {
abi_size: u64,
abi_align: Alignment,
most_aligned_field: u32,
most_aligned_field_size: u64,
biggest_field: u32,
payload_size: u64,
payload_align: Alignment,
tag_align: Alignment,
tag_size: u64,
padding: u32,
pub fn tagOffset(layout: UnionLayout) u64 {
return if (layout.tag_align.compare(.lt, layout.payload_align)) layout.payload_size else 0;
}
pub fn payloadOffset(layout: UnionLayout) u64 {
return if (layout.tag_align.compare(.lt, layout.payload_align)) 0 else layout.tag_size;
}
};
/// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &zcu.intern_pool;
if (enum_tag.toIntern() == .none) return null;
const enum_tag_key = ip.indexToKey(enum_tag.toIntern()).enum_tag;
assert(enum_tag_key.ty == loaded_union.enum_tag_type);
const loaded_enum = ip.loadEnumType(loaded_union.enum_tag_type);
return loaded_enum.tagValueIndex(ip, enum_tag_key.int);
}
pub const ResolvedReference = struct {
referencer: AnalUnit,
/// If `inline_frame` is not `.none`, this is the *deepest* source location in the chain of
/// inline calls. For source locations further up the inline call stack, consult `inline_frame`.
src: LazySrcLoc,
inline_frame: InlineReferenceFrame.Index.Optional,
};
/// Returns a mapping from an `AnalUnit` to where it is referenced.
/// If the value is `null`, the `AnalUnit` is a root of analysis.
/// If an `AnalUnit` is not in the returned map, it is unreferenced.
/// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller.
/// This hashmap is cached, so repeated calls to this function are cheap.
pub fn resolveReferences(zcu: *Zcu) Allocator.Error!*const std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
if (zcu.resolved_references == null) {
zcu.resolved_references = try zcu.resolveReferencesInner();
}
return &zcu.resolved_references.?;
}
fn resolveReferencesInner(zcu: *Zcu) Allocator.Error!std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
const gpa = zcu.gpa;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
var units: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty;
var types: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .empty;
defer {
units.deinit(gpa);
types.deinit(gpa);
}
// This is not a sufficient size, but an approximate lower bound.
try units.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
try types.ensureTotalCapacity(gpa, zcu.analysis_roots_len);
for (zcu.analysisRoots()) |mod| {
const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
const root_ty = zcu.fileRootType(file);
if (root_ty == .none) continue;
types.putAssumeCapacityNoClobber(root_ty, null);
}
var unit_idx: usize = 0;
var type_idx: usize = 0;
while (true) {
if (type_idx < types.count()) {
const ty = types.keys()[type_idx];
const referencer = types.values()[type_idx];
type_idx += 1;
refs_log.debug("handle type '{f}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
// Queue any decls within this type which would be automatically analyzed.
// Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
for (zcu.namespacePtr(ns).comptime_decls.items) |cu| {
// `comptime` decls are always analyzed.
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
const gop = try units.getOrPut(gpa, unit);
if (!gop.found_existing) {
refs_log.debug("type '{f}': ref comptime %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(ip.getComptimeUnit(cu).zir_index.resolve(ip) orelse continue),
});
gop.value_ptr.* = referencer;
}
}
for (zcu.namespacePtr(ns).test_decls.items) |nav_id| {
const nav = ip.getNav(nav_id);
// `test` declarations are analyzed depending on the test filter.
const inst_info = nav.analysis.?.zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
const decl = file.zir.?.getDeclaration(inst_info.inst);
if (!comp.config.is_test or file.mod != zcu.main_mod) continue;
const want_analysis = switch (decl.kind) {
.@"const", .@"var" => unreachable,
.@"comptime" => unreachable,
.unnamed_test => true,
.@"test", .decltest => a: {
const fqn_slice = nav.fqn.toSlice(ip);
if (comp.test_filters.len > 0) {
for (comp.test_filters) |test_filter| {
if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break;
} else break :a false;
}
break :a true;
},
};
if (want_analysis) {
{
const gop = try units.getOrPut(gpa, .wrap(.{ .nav_val = nav_id }));
if (!gop.found_existing) {
refs_log.debug("type '{f}': ref test %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
gop.value_ptr.* = referencer;
}
}
// Non-fatal AstGen errors could mean this test decl failed
if (nav.resolved != null and nav.resolved.?.value != .none) {
const gop = try units.getOrPut(gpa, .wrap(.{ .func = nav.resolved.?.value }));
if (!gop.found_existing) gop.value_ptr.* = referencer;
}
}
}
for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| {
// These are named declarations. They are analyzed only if marked `export`.
const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
const decl = file.zir.?.getDeclaration(inst_info.inst);
if (decl.linkage == .@"export") {
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
const gop = try units.getOrPut(gpa, unit);
if (!gop.found_existing) {
refs_log.debug("type '{f}': ref named %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
gop.value_ptr.* = referencer;
}
}
}
for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| {
// These are named declarations. They are analyzed only if marked `export`.
const inst_info = ip.getNav(nav).analysis.?.zir_index.resolveFull(ip) orelse continue;
const file = zcu.fileByIndex(inst_info.file);
const decl = file.zir.?.getDeclaration(inst_info.inst);
if (decl.linkage == .@"export") {
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
const gop = try units.getOrPut(gpa, unit);
if (!gop.found_existing) {
refs_log.debug("type '{f}': ref named %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
gop.value_ptr.* = referencer;
}
}
}
continue;
}
if (unit_idx < units.count()) {
const unit = units.keys()[unit_idx];
unit_idx += 1;
// `nav_val` and `nav_ty` reference each other *implicitly* to save memory.
// Likewise for `type_layout` and `struct_defaults` of a struct type.
queue_paired: {
const other: AnalUnit = .wrap(switch (unit.unwrap()) {
.nav_val => |n| .{ .nav_ty = n },
.nav_ty => |n| .{ .nav_val = n },
.struct_defaults => |ty| .{ .type_layout = ty },
.type_layout => |ty| switch (ip.indexToKey(ty)) {
.struct_type => .{ .struct_defaults = ty },
.union_type, .enum_type, .opaque_type => break :queue_paired,
else => unreachable,
},
.@"comptime", .func, .memoized_state => break :queue_paired,
});
const gop = try units.getOrPut(gpa, other);
if (gop.found_existing) break :queue_paired;
gop.value_ptr.* = units.values()[unit_idx - 1]; // same reference location
}
refs_log.debug("handle unit '{f}'", .{zcu.fmtAnalUnit(unit)});
if (zcu.reference_table.get(unit)) |first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_references.items[ref_idx];
const gop = try units.getOrPut(gpa, ref.referenced);
if (!gop.found_existing) {
refs_log.debug("unit '{f}': ref unit '{f}'", .{
zcu.fmtAnalUnit(unit),
zcu.fmtAnalUnit(ref.referenced),
});
gop.value_ptr.* = .{
.referencer = unit,
.src = ref.src,
.inline_frame = ref.inline_frame,
};
}
ref_idx = ref.next;
}
}
if (zcu.type_reference_table.get(unit)) |first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
var ref_idx = first_ref_idx;
while (ref_idx != std.math.maxInt(u32)) {
const ref = zcu.all_type_references.items[ref_idx];
const gop = try types.getOrPut(gpa, ref.referenced);
if (!gop.found_existing) {
refs_log.debug("unit '{f}': ref type '{f}'", .{
zcu.fmtAnalUnit(unit),
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
});
gop.value_ptr.* = .{
.referencer = unit,
.src = ref.src,
.inline_frame = .none,
};
}
ref_idx = ref.next;
}
}
continue;
}
break;
}
return units.move();
}
pub fn analysisRoots(zcu: *Zcu) []*Package.Module {
return zcu.analysis_roots_buffer[0..zcu.analysis_roots_len];
}
pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
return zcu.intern_pool.filePtr(file_index);
}
/// Returns the struct that represents this `File`.
/// If the struct has not been created, returns `.none`.
pub fn fileRootType(zcu: *const Zcu, file_index: File.Index) InternPool.Index {
const ip = &zcu.intern_pool;
const file_index_unwrapped = file_index.unwrap(ip);
const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
return files.view().items(.root_type)[file_index_unwrapped.index];
}
pub fn setFileRootType(zcu: *Zcu, file_index: File.Index, root_type: InternPool.Index) void {
const ip = &zcu.intern_pool;
const file_index_unwrapped = file_index.unwrap(ip);
const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire();
files.view().items(.root_type)[file_index_unwrapped.index] = root_type;
}
pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
const ip = &zcu.intern_pool;
return .{
.base_node_inst = ip.getNav(nav_index).srcInst(ip),
.offset = LazySrcLoc.Offset.nodeOffset(.zero),
};
}
pub fn typeSrcLoc(zcu: *const Zcu, ty_index: InternPool.Index) LazySrcLoc {
_ = zcu;
_ = ty_index;
@panic("TODO");
}
pub fn typeFileScope(zcu: *Zcu, ty_index: InternPool.Index) *File {
_ = zcu;
_ = ty_index;
@panic("TODO");
}
pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
const ip = &zcu.intern_pool;
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
const zir = zcu.fileByIndex(inst_info.file).zir;
return zir.?.getDeclaration(inst_info.inst).src_line;
}
pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value {
return .fromInterned(zcu.intern_pool.getNav(nav_index).resolved.?.value);
}
pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index {
const ip = &zcu.intern_pool;
return ip.getNav(nav).srcInst(ip).resolveFile(ip);
}
pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
return zcu.fileByIndex(zcu.navFileScopeIndex(nav));
}
pub fn navAlignment(zcu: *Zcu, nav_id: InternPool.Nav.Index) InternPool.Alignment {
const resolved = zcu.intern_pool.getNav(nav_id).resolved.?;
return switch (resolved.@"align") {
else => |a| a,
.none => Type.fromInterned(resolved.type).abiAlignment(zcu),
};
}
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Alt(FormatAnalUnit, formatAnalUnit) {
return .{ .data = .{ .unit = unit, .zcu = zcu } };
}
pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Alt(FormatDependee, formatDependee) {
return .{ .data = .{ .dependee = d, .zcu = zcu } };
}
const FormatAnalUnit = struct { unit: AnalUnit, zcu: *const Zcu };
fn formatAnalUnit(data: FormatAnalUnit, writer: *Io.Writer) Io.Writer.Error!void {
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.unit.unwrap()) {
.@"comptime" => |cu_id| {
const cu = ip.getComptimeUnit(cu_id);
if (cu.zir_index.resolveFull(ip)) |resolved| {
const file_path = zcu.fileByIndex(resolved.file).path;
return writer.print("comptime(inst=('{f}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
} else {
return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
}
},
.nav_val, .nav_ty => |nav, tag| return writer.print("{t}('{f}' [{}])", .{ tag, ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
.type_layout, .struct_defaults => |ty, tag| return writer.print("{t}('{f}' [{}])", .{ tag, Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
.func => |func| {
const nav = zcu.funcInfo(func).owner_nav;
return writer.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
},
.memoized_state => return writer.writeAll("memoized_state"),
}
}
const FormatDependee = struct { dependee: InternPool.Dependee, zcu: *const Zcu };
fn formatDependee(data: FormatDependee, writer: *Io.Writer) Io.Writer.Error!void {
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.dependee) {
.src_hash => |ti| {
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("inst(<lost>)");
};
const file_path = zcu.fileByIndex(info.file).path;
return writer.print("inst('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
},
.nav_val, .nav_ty => |nav, tag| {
const fqn = ip.getNav(nav).fqn;
return writer.print("{t}('{f}')", .{ tag, fqn.fmt(ip) });
},
.type_layout, .struct_defaults => |ip_index, tag| {
const name = Type.fromInterned(ip_index).containerTypeName(ip);
return writer.print("{t}('{f}')", .{ tag, name.fmt(ip) });
},
.func_ies => |ip_index| {
const fqn = ip.getNav(ip.indexToKey(ip_index).func.owner_nav).fqn;
return writer.print("func_ies('{f}')", .{fqn.fmt(ip)});
},
.source_file => |file| {
const file_path = zcu.fileByIndex(file).path;
return writer.print("source_file('{f}')", .{file_path.fmt(zcu.comp)});
},
.embed_file => |ef_idx| {
const ef = ef_idx.get(zcu);
return writer.print("embed_file('{f}')", .{ef.path.fmt(zcu.comp)});
},
.namespace => |ti| {
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("namespace(<lost>)");
};
const file_path = zcu.fileByIndex(info.file).path;
return writer.print("namespace('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
},
.namespace_name => |k| {
const info = k.namespace.resolveFull(ip) orelse {
return writer.print("namespace(<lost>, '{f}')", .{k.name.fmt(ip)});
};
const file_path = zcu.fileByIndex(info.file).path;
return writer.print("namespace('{f}', %{d}, '{f}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
},
.memoized_state => return writer.writeAll("memoized_state"),
}
}
pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enum) {
ok,
bad_arch: []const std.Target.Cpu.Arch, // value is allowed archs for cc
bad_backend: std.builtin.CompilerBackend, // value is current backend
} {
const target = zcu.getTarget();
const backend = target_util.zigBackend(target, zcu.comp.config.use_llvm);
switch (cc) {
.auto, .@"inline" => return .ok,
.async => return .{ .bad_backend = backend }, // nothing supports async currently
.naked => {}, // depends only on backend
else => for (cc.archs()) |allowed_arch| {
if (allowed_arch == target.cpu.arch) break;
} else return .{ .bad_arch = cc.archs() },
}
const backend_ok = switch (backend) {
.stage1 => unreachable,
.other => unreachable,
_ => unreachable,
.stage2_llvm => @import("codegen/llvm.zig").toLlvmCallConv(cc, target) != null,
.stage2_c => ok: {
if (target.cCallingConvention()) |default_c| {
if (cc.eql(default_c)) {
break :ok true;
}
}
break :ok switch (cc) {
.x86_16_cdecl,
.x86_16_stdcall,
.x86_16_regparmcall,
.x86_16_interrupt,
.x86_64_sysv,
.x86_64_win,
.x86_64_vectorcall,
.x86_64_regcall_v3_sysv,
.x86_64_regcall_v4_win,
.x86_64_interrupt,
.x86_fastcall,
.x86_thiscall,
.x86_vectorcall,
.x86_regcall_v3,
.x86_regcall_v4_win,
.x86_interrupt,
.aarch64_vfabi,
.aarch64_vfabi_sve,
.arm_aapcs,
.csky_interrupt,
.riscv64_lp64_v,
.riscv32_ilp32_v,
.m68k_rtd,
.m68k_interrupt,
.msp430_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.arm_aapcs_vfp,
=> |opts| opts.incoming_stack_alignment == null,
.arc_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.arm_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.microblaze_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.mips_interrupt,
.mips64_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.riscv32_interrupt,
.riscv64_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.sh_interrupt,
=> |opts| opts.incoming_stack_alignment == null,
.x86_sysv,
.x86_win,
.x86_stdcall,
=> |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
.avr_interrupt,
.avr_signal,
=> true,
.ez80_tiflags => true,
.naked => true,
else => false,
};
},
.stage2_wasm => switch (cc) {
.wasm_mvp => |opts| opts.incoming_stack_alignment == null,
else => false,
},
.stage2_arm => switch (cc) {
.arm_aapcs => |opts| opts.incoming_stack_alignment == null,
.naked => true,
else => false,
},
.stage2_x86_64 => switch (cc) {
.x86_64_sysv, .x86_64_win, .naked => true, // incoming stack alignment supported
else => false,
},
.stage2_aarch64 => switch (cc) {
.aarch64_aapcs, .aarch64_aapcs_darwin, .naked => true,
else => false,
},
.stage2_x86 => switch (cc) {
.x86_sysv,
.x86_win,
=> |opts| opts.incoming_stack_alignment == null and opts.register_params == 0,
.naked => true,
else => false,
},
.stage2_powerpc => switch (target.cpu.arch) {
.powerpc, .powerpcle => switch (cc) {
.powerpc_sysv,
.powerpc_sysv_altivec,
.powerpc_aix,
.powerpc_aix_altivec,
.naked,
=> true,
else => false,
},
.powerpc64, .powerpc64le => switch (cc) {
.powerpc64_elf,
.powerpc64_elf_altivec,
.powerpc64_elf_v2,
.naked,
=> true,
else => false,
},
else => unreachable,
},
.stage2_riscv64 => switch (cc) {
.riscv64_lp64 => |opts| opts.incoming_stack_alignment == null,
.naked => true,
else => false,
},
.stage2_sparc64 => switch (cc) {
.sparc64_sysv => |opts| opts.incoming_stack_alignment == null,
.naked => true,
else => false,
},
.stage2_spirv => switch (cc) {
.spirv_device, .spirv_kernel => true,
.spirv_fragment, .spirv_vertex => target.os.tag == .vulkan or target.os.tag == .opengl,
else => false,
},
};
if (!backend_ok) return .{ .bad_backend = backend };
return .ok;
}
pub const CodegenFailError = error{
/// Indicates the error message has been already stored at `Zcu.failed_codegen`.
CodegenFail,
OutOfMemory,
};
pub fn codegenFail(
zcu: *Zcu,
nav_index: InternPool.Nav.Index,
comptime format: []const u8,
args: anytype,
) CodegenFailError {
const msg = try Zcu.ErrorMsg.create(zcu.gpa, zcu.navSrcLoc(nav_index), format, args);
return zcu.codegenFailMsg(nav_index, msg);
}
/// Takes ownership of `msg`, even on OOM.
pub fn codegenFailMsg(zcu: *Zcu, nav_index: InternPool.Nav.Index, msg: *ErrorMsg) CodegenFailError {
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
{
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
errdefer msg.deinit(gpa);
try zcu.failed_codegen.putNoClobber(gpa, nav_index, msg);
}
return error.CodegenFail;
}
/// Asserts that `zcu.failed_codegen` contains the key `nav`, with the necessary lock held.
pub fn assertCodegenFailed(zcu: *Zcu, nav: InternPool.Nav.Index) void {
const comp = zcu.comp;
const io = comp.io;
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
assert(zcu.failed_codegen.contains(nav));
}
pub fn codegenFailType(
zcu: *Zcu,
ty_index: InternPool.Index,
comptime format: []const u8,
args: anytype,
) CodegenFailError {
const gpa = zcu.gpa;
try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
const msg = try Zcu.ErrorMsg.create(gpa, zcu.typeSrcLoc(ty_index), format, args);
zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
return error.CodegenFail;
}
pub fn codegenFailTypeMsg(zcu: *Zcu, ty_index: InternPool.Index, msg: *ErrorMsg) CodegenFailError {
const gpa = zcu.gpa;
{
errdefer msg.deinit(gpa);
try zcu.failed_types.ensureUnusedCapacity(gpa, 1);
}
zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
return error.CodegenFail;
}
/// Asserts that `zcu.multi_module_err != null`.
pub fn addFileInMultipleModulesError(
zcu: *Zcu,
eb: *std.zig.ErrorBundle.Wip,
) Allocator.Error!void {
const gpa = zcu.gpa;
const info = zcu.multi_module_err.?;
const file = info.file;
// error: file exists in modules 'root.foo' and 'root.bar'
// note: files must belong to only one module
// note: file is imported here
// note: which is imported here
// note: which is the root of module 'root.foo' imported here
// note: file is the root of module 'root.bar' imported here
const file_src = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb);
const root_msg = try eb.printString("file exists in modules '{s}' and '{s}'", .{
info.modules[0].fully_qualified_name,
info.modules[1].fully_qualified_name,
});
var notes: std.ArrayList(std.zig.ErrorBundle.MessageIndex) = .empty;
defer notes.deinit(gpa);
try notes.append(gpa, try eb.addErrorMessage(.{
.msg = try eb.addString("files must belong to only one module"),
.src_loc = file_src,
}));
try zcu.explainWhyFileIsInModule(eb, &notes, file, info.modules[0], info.refs[0]);
try zcu.explainWhyFileIsInModule(eb, &notes, file, info.modules[1], info.refs[1]);
try eb.addRootErrorMessage(.{
.msg = root_msg,
.src_loc = file_src,
.notes_len = @intCast(notes.items.len),
});
const notes_start = try eb.reserveNotes(@intCast(notes.items.len));
const notes_slice: []std.zig.ErrorBundle.MessageIndex = @ptrCast(eb.extra.items[notes_start..]);
@memcpy(notes_slice, notes.items);
}
fn explainWhyFileIsInModule(
zcu: *Zcu,
eb: *std.zig.ErrorBundle.Wip,
notes_out: *std.ArrayList(std.zig.ErrorBundle.MessageIndex),
file: File.Index,
in_module: *Package.Module,
ref: File.Reference,
) Allocator.Error!void {
const gpa = zcu.gpa;
// error: file is the root of module 'foo'
//
// error: file is imported here by the root of module 'foo'
//
// error: file is imported here
// note: which is imported here
// note: which is imported here by the root of module 'foo'
var import = switch (ref) {
.analysis_root => |mod| {
assert(mod == in_module);
try notes_out.append(gpa, try eb.addErrorMessage(.{
.msg = try eb.printString("file is the root of module '{s}'", .{mod.fully_qualified_name}),
.src_loc = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb),
}));
return;
},
.import => |import| if (import.module) |mod| {
assert(mod == in_module);
try notes_out.append(gpa, try eb.addErrorMessage(.{
.msg = try eb.printString("file is the root of module '{s}'", .{mod.fully_qualified_name}),
.src_loc = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb),
}));
return;
} else import,
};
var is_first = true;
while (true) {
const thing: []const u8 = if (is_first) "file" else "which";
is_first = false;
const importer_file = zcu.fileByIndex(import.importer);
// `errorBundleTokenSrc` expects the tree to be loaded
_ = importer_file.getTree(zcu) catch |err| {
try Compilation.unableToLoadZcuFile(zcu, eb, importer_file, err);
return; // stop the explanation early
};
const import_src = try importer_file.errorBundleTokenSrc(import.tok, zcu, eb);
const importer_ref = zcu.alive_files.get(import.importer).?;
const importer_root: ?*Package.Module = switch (importer_ref) {
.analysis_root => |mod| mod,
.import => |i| i.module,
};
if (importer_root) |m| {
try notes_out.append(gpa, try eb.addErrorMessage(.{
.msg = try eb.printString("{s} is imported here by the root of module '{s}'", .{ thing, m.fully_qualified_name }),
.src_loc = import_src,
}));
return;
}
try notes_out.append(gpa, try eb.addErrorMessage(.{
.msg = try eb.printString("{s} is imported here", .{thing}),
.src_loc = import_src,
}));
import = importer_ref.import;
}
}
pub fn addDependencyLoopErrors(zcu: *Zcu, eb: *std.zig.ErrorBundle.Wip) Allocator.Error!void {
const gpa = zcu.comp.gpa;
const all_references = try zcu.resolveReferences();
var units: std.ArrayList(AnalUnit) = .empty;
defer units.deinit(gpa);
// TODO: sort the dependency loops somehow to make the error bundle reproducible
for (zcu.dependency_loops.keys()) |arbitrary_unit| {
units.clearRetainingCapacity();
var cur = arbitrary_unit;
while (true) {
try units.append(gpa, cur);
cur = zcu.dependency_loop_nodes.get(cur).?.unit;
if (cur == arbitrary_unit) break;
}
// `units` now contains all units in the loop. We need to pick a starting point somewhere
// along that loop to begin. We will pick whichever node has the shortest reference trace,
// because the other units may well just be referenced *by* that one! This is also likely
// to match the user's intuition for where the loop "starts".
var start_index: usize = 0;
var start_depth: u32 = depth: {
var depth: u32 = 0;
var opt_ref = all_references.get(units.items[0]) orelse {
// This dependency loop is actually unreferenced, so we don't need to emit a compile
// error at all! Move onto the next dependency loop.
continue;
};
while (opt_ref) |ref| : (opt_ref = all_references.get(ref.referencer).?) depth += 1;
break :depth depth;
};
for (units.items[1..], 1..) |unit, index| {
var depth: u32 = 0;
var opt_ref = all_references.get(unit).?;
while (opt_ref) |ref| : (opt_ref = all_references.get(ref.referencer).?) depth += 1;
if (depth < start_depth) {
start_index = index;
start_depth = depth;
}
}
// Collect a reference trace for the start of the loop.
var ref_trace: std.ArrayList(std.zig.ErrorBundle.ReferenceTrace) = .empty;
defer ref_trace.deinit(gpa);
const frame_limit = zcu.comp.reference_trace orelse 0;
try zcu.populateReferenceTrace(units.items[start_index], frame_limit, eb, &ref_trace);
if (units.items.len == 1) {
// Don't do a complicated message with multiple notes, just do a single error message.
assert(start_index == 0);
const root_msg = addDependencyLoopErrorLine(zcu, eb, units.items[start_index], ref_trace.items) catch |err| switch (err) {
error.AlreadyReported => return, // give up on the dep loop error
error.OutOfMemory => |e| return e,
};
try eb.root_list.append(eb.gpa, root_msg);
continue;
}
// Collect all notes first so we don't leave an incomplete root error message on `error.AlreadyReported`.
const note_buf = try gpa.alloc(std.zig.ErrorBundle.MessageIndex, units.items.len + 1);
defer gpa.free(note_buf);
note_buf[0] = addDependencyLoopErrorLine(zcu, eb, units.items[start_index], ref_trace.items) catch |err| switch (err) {
error.AlreadyReported => return, // give up on the dep loop error
error.OutOfMemory => |e| return e,
};
for (units.items[start_index + 1 ..], note_buf[1 .. units.items.len - start_index]) |unit, *note| {
note.* = addDependencyLoopErrorLine(zcu, eb, unit, &.{}) catch |err| switch (err) {
error.AlreadyReported => return, // give up on the dep loop error
error.OutOfMemory => |e| return e,
};
}
for (units.items[0..start_index], note_buf[units.items.len - start_index .. units.items.len]) |unit, *note| {
note.* = addDependencyLoopErrorLine(zcu, eb, unit, &.{}) catch |err| switch (err) {
error.AlreadyReported => return, // give up on the dep loop error
error.OutOfMemory => |e| return e,
};
}
note_buf[units.items.len] = try eb.addErrorMessage(.{
.msg = try eb.addString("eliminate any one of these dependencies to break the loop"),
.src_loc = .none,
});
try eb.addRootErrorMessage(.{
.msg = try eb.printString("dependency loop with length {d}", .{units.items.len}),
.src_loc = .none,
.notes_len = @intCast(units.items.len + 1),
});
const notes_start = try eb.reserveNotes(@intCast(units.items.len + 1));
const notes: []std.zig.ErrorBundle.MessageIndex = @ptrCast(eb.extra.items[notes_start..]);
@memcpy(notes, note_buf);
}
}
fn addDependencyLoopErrorLine(
zcu: *Zcu,
eb: *std.zig.ErrorBundle.Wip,
source_unit: AnalUnit,
ref_trace: []const std.zig.ErrorBundle.ReferenceTrace,
) (Allocator.Error || error{AlreadyReported})!std.zig.ErrorBundle.MessageIndex {
const ip = &zcu.intern_pool;
const comp = zcu.comp;
const fmt_source: std.fmt.Alt(FormatAnalUnit, formatDependencyLoopSourceUnit) = .{ .data = .{
.unit = source_unit,
.zcu = zcu,
} };
const dep_node = zcu.dependency_loop_nodes.get(source_unit).?;
const msg: std.zig.ErrorBundle.String = if (dep_node.unit == source_unit) switch (source_unit.unwrap()) {
.@"comptime" => unreachable, // cannot be involved in a dependency loop
.nav_ty, .nav_val => try eb.printString("{f} depends on itself here", .{fmt_source}),
.memoized_state => unreachable, // memoized_state definitely does not *directly* depend on itself
.func => try eb.printString("{f} uses its own inferred error set here", .{fmt_source}),
.type_layout => try eb.printString("{f} depends on itself {s}", .{
fmt_source,
dep_node.reason.type_layout_reason.msg(),
}),
.struct_defaults => |ty| try eb.printString(
"default field values of '{f}' depend on themselves for initialization here",
.{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)},
),
} else switch (dep_node.unit.unwrap()) {
.@"comptime" => unreachable, // cannot be involved in a dependency loop
.nav_val => |nav| try eb.printString("{f} uses value of declaration '{f}' here", .{
fmt_source, ip.getNav(nav).fqn.fmt(ip),
}),
.nav_ty => |nav| try eb.printString("{f} uses type of declaration '{f}' here", .{
fmt_source, ip.getNav(nav).fqn.fmt(ip),
}),
.memoized_state => |stage| switch (stage) {
.panic => try eb.printString("{f} requires panic handler for call here", .{fmt_source}),
else => try eb.printString("{f} requires 'std.builtin' declarations here", .{fmt_source}),
},
.func => |func| try eb.printString("{f} uses inferred error set of function '{f}' here", .{
fmt_source, ip.getNav(zcu.funcInfo(func).owner_nav).fqn.fmt(ip),
}),
.type_layout => |ty| try eb.printString("{f} depends on type '{f}' {s}", .{
fmt_source,
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
dep_node.reason.type_layout_reason.msg(),
}),
.struct_defaults => |ty| try eb.printString(
"{f} uses default field values of '{f}' here",
.{ fmt_source, Type.fromInterned(ty).containerTypeName(ip).fmt(ip) },
),
};
const src_loc = dep_node.reason.src.upgrade(zcu);
const source = src_loc.file_scope.getSource(zcu) catch |err| {
try Compilation.unableToLoadZcuFile(zcu, eb, src_loc.file_scope, err);
return error.AlreadyReported;
};
const span = src_loc.span(zcu) catch |err| {
try Compilation.unableToLoadZcuFile(zcu, eb, src_loc.file_scope, err);
return error.AlreadyReported;
};
const loc = std.zig.findLineColumn(source, span.main);
const eb_src = try eb.addSourceLocation(.{
.src_path = try eb.printString("{f}", .{src_loc.file_scope.path.fmt(comp)}),
.span_start = span.start,
.span_main = span.main,
.span_end = span.end,
.line = @intCast(loc.line),
.column = @intCast(loc.column),
.source_line = try eb.addString(loc.source_line),
.reference_trace_len = @intCast(ref_trace.len),
});
for (ref_trace) |rt| try eb.addReferenceTrace(rt);
return eb.addErrorMessage(.{
.msg = msg,
.src_loc = eb_src,
});
}
fn formatDependencyLoopSourceUnit(data: FormatAnalUnit, w: *Io.Writer) Io.Writer.Error!void {
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.unit.unwrap()) {
.@"comptime" => unreachable, // cannot be involved in a dependency loop
.nav_val => |nav| try w.print("value of declaration '{f}'", .{ip.getNav(nav).fqn.fmt(ip)}),
.nav_ty => |nav| try w.print("type of declaration '{f}'", .{ip.getNav(nav).fqn.fmt(ip)}),
.memoized_state => |stage| switch (stage) {
.panic => try w.writeAll("panic handler"),
else => try w.writeAll("'std.builtin' declarations"),
},
.type_layout => |ty| try w.print("type '{f}'", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
}),
.struct_defaults => |ty| try w.print("default field value of '{f}'", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
}),
.func => |func| try w.print("function '{f}'", .{
ip.getNav(zcu.funcInfo(func).owner_nav).fqn.fmt(ip),
}),
}
}
pub fn populateReferenceTrace(
zcu: *Zcu,
root: AnalUnit,
frame_limit: u32,
eb: *std.zig.ErrorBundle.Wip,
ref_trace: *std.ArrayList(std.zig.ErrorBundle.ReferenceTrace),
) Allocator.Error!void {
const ip = &zcu.intern_pool;
const gpa = zcu.comp.gpa;
if (frame_limit == 0) return;
const all_references = try zcu.resolveReferences();
var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .empty;
defer seen.deinit(gpa);
var referenced_by = root;
while (all_references.get(referenced_by)) |maybe_ref| {
const ref = maybe_ref orelse break;
const gop = try seen.getOrPut(gpa, ref.referencer);
if (gop.found_existing) break;
if (ref_trace.items.len < frame_limit) {
var last_call_src = ref.src;
var opt_inline_frame = ref.inline_frame;
while (opt_inline_frame.unwrap()) |inline_frame| {
const f = inline_frame.ptr(zcu).*;
const func_nav = ip.indexToKey(f.callee).func.owner_nav;
const func_name = ip.getNav(func_nav).name.toSlice(ip);
addReferenceTraceFrame(zcu, eb, ref_trace, func_name, last_call_src, true) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AlreadyReported => {
// An incomplete reference trace isn't the end of the world; just cut it off.
return;
},
};
last_call_src = f.call_src;
opt_inline_frame = f.parent;
}
const root_name: ?[]const u8 = switch (ref.referencer.unwrap()) {
.@"comptime" => "comptime",
.nav_val, .nav_ty => |nav| ip.getNav(nav).name.toSlice(ip),
.type_layout, .struct_defaults => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip),
.memoized_state => null,
};
if (root_name) |n| {
addReferenceTraceFrame(zcu, eb, ref_trace, n, last_call_src, false) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.AlreadyReported => {
// An incomplete reference trace isn't the end of the world; just cut it off.
return;
},
};
}
}
referenced_by = ref.referencer;
}
if (seen.count() > ref_trace.items.len) {
try ref_trace.append(gpa, .{
.decl_name = @intCast(seen.count() - ref_trace.items.len),
.src_loc = .none,
});
}
}
fn addReferenceTraceFrame(
zcu: *Zcu,
eb: *std.zig.ErrorBundle.Wip,
ref_trace: *std.ArrayList(std.zig.ErrorBundle.ReferenceTrace),
name: []const u8,
lazy_src: Zcu.LazySrcLoc,
inlined: bool,
) error{ OutOfMemory, AlreadyReported }!void {
const gpa = zcu.gpa;
const src = lazy_src.upgrade(zcu);
const source = src.file_scope.getSource(zcu) catch |err| {
try Compilation.unableToLoadZcuFile(zcu, eb, src.file_scope, err);
return error.AlreadyReported;
};
const span = src.span(zcu) catch |err| {
try Compilation.unableToLoadZcuFile(zcu, eb, src.file_scope, err);
return error.AlreadyReported;
};
const loc = std.zig.findLineColumn(source, span.main);
try ref_trace.append(gpa, .{
.decl_name = try eb.printString("{s}{s}", .{ name, if (inlined) " [inlined]" else "" }),
.src_loc = try eb.addSourceLocation(.{
.src_path = try eb.printString("{f}", .{src.file_scope.path.fmt(zcu.comp)}),
.span_start = span.start,
.span_main = span.main,
.span_end = span.end,
.line = @intCast(loc.line),
.column = @intCast(loc.column),
.source_line = 0,
}),
});
}
const TrackedUnitSema = struct {
/// `null` means we created the node, so should end it.
old_name: ?[std.Progress.Node.max_name_len]u8,
old_analysis_timer: ?Compilation.Timer,
analysis_timer_decl: ?InternPool.TrackedInst.Index,
pub fn end(tus: TrackedUnitSema, zcu: *Zcu) void {
const comp = zcu.comp;
const io = comp.io;
if (tus.old_name) |old_name| {
zcu.sema_prog_node.completeOne(); // we're just renaming, but it's effectively completion
zcu.cur_sema_prog_node.setName(&old_name);
} else {
zcu.cur_sema_prog_node.end();
zcu.cur_sema_prog_node = .none;
}
report_time: {
const sema_ns = zcu.cur_analysis_timer.?.finish(io) orelse break :report_time;
const zir_decl = tus.analysis_timer_decl orelse break :report_time;
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_sema += sema_ns;
const gop = comp.time_report.?.decl_sema_info.getOrPut(comp.gpa, zir_decl) catch |err| switch (err) {
error.OutOfMemory => {
comp.setAllocFailure();
break :report_time;
},
};
if (!gop.found_existing) gop.value_ptr.* = .{ .ns = 0, .count = 0 };
gop.value_ptr.ns += sema_ns;
gop.value_ptr.count += 1;
}
zcu.cur_analysis_timer = tus.old_analysis_timer;
if (zcu.cur_analysis_timer) |*t| t.@"resume"(io);
}
};
pub fn trackUnitSema(zcu: *Zcu, name: []const u8, zir_inst: ?InternPool.TrackedInst.Index) TrackedUnitSema {
const comp = zcu.comp;
const io = comp.io;
if (zcu.cur_analysis_timer) |*t| t.pause(io);
const old_analysis_timer = zcu.cur_analysis_timer;
zcu.cur_analysis_timer = zcu.comp.startTimer();
const old_name: ?[std.Progress.Node.max_name_len]u8 = old_name: {
if (zcu.cur_sema_prog_node.index == .none) {
zcu.cur_sema_prog_node = zcu.sema_prog_node.start(name, 0);
break :old_name null;
}
const old_name = zcu.cur_sema_prog_node.getName();
zcu.cur_sema_prog_node.setName(name);
break :old_name old_name;
};
return .{
.old_name = old_name,
.old_analysis_timer = old_analysis_timer,
.analysis_timer_decl = zir_inst,
};
}
pub const CodegenTaskPool = struct {
const CodegenResult = PerThread.RunCodegenError!codegen.AnyMir;
/// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is
/// around 20. Going by that 50x multiplier, and assuming we want to consume no more than 500 MiB of
/// memory on AIR/MIR, we see a limit of around 10 MiB of AIR in-flight.
const max_air_bytes_in_flight = 10 * 1024 * 1024;
const max_funcs_in_flight = @import("link.zig").Queue.buffer_size;
available_air_bytes: u32,
/// Locks the freelist and `available_air_bytes`.
mutex: Io.Mutex,
/// Signaled when an item is added to the freelist.
free_cond: Io.Condition,
/// Pre-allocated with enough capacity for all indices.
free: std.ArrayList(Index),
/// `.none` means this task is in the freelist. The `task_air_bytes` and
/// `task_futures` entries are `undefined`.
task_funcs: []InternPool.Index,
task_air_bytes: []u32,
task_futures: []Io.Future(CodegenResult),
pub fn init(arena: Allocator) Allocator.Error!CodegenTaskPool {
const task_funcs = try arena.alloc(InternPool.Index, max_funcs_in_flight);
const task_air_bytes = try arena.alloc(u32, max_funcs_in_flight);
const task_futures = try arena.alloc(Io.Future(CodegenResult), max_funcs_in_flight);
@memset(task_funcs, .none);
var free: std.ArrayList(Index) = try .initCapacity(arena, max_funcs_in_flight);
for (0..max_funcs_in_flight) |index| free.appendAssumeCapacity(@enumFromInt(index));
return .{
.available_air_bytes = max_air_bytes_in_flight,
.mutex = .init,
.free_cond = .init,
.free = free,
.task_funcs = task_funcs,
.task_air_bytes = task_air_bytes,
.task_futures = task_futures,
};
}
pub fn cancel(pool: *CodegenTaskPool, zcu: *const Zcu) void {
const io = zcu.comp.io;
for (
pool.task_funcs,
pool.task_air_bytes,
pool.task_futures,
) |func, effective_air_bytes, *future| {
if (func == .none) continue;
pool.available_air_bytes += effective_air_bytes;
var mir = future.cancel(io) catch continue;
mir.deinit(zcu);
}
assert(pool.available_air_bytes == max_air_bytes_in_flight);
}
pub fn start(
pool: *CodegenTaskPool,
zcu: *Zcu,
func_index: InternPool.Index,
air: *Air,
/// If `true`, this function will take ownership of `air`, freeing it after codegen
/// completes; it is not assumed that `air` will outlive this function. If `false`,
/// codegen will operate on `air` via the given pointer, which it is assumed will
/// outline the codegen task.
move_air: bool,
) Io.Cancelable!Index {
const io = zcu.comp.io;
// To avoid consuming an excessive amount of memory, there is a limit on the total number of AIR
// bytes which can be in the codegen/link pipeline at one time. If we exceed this limit, we must
// wait for codegen/link to finish some WIP functions so they catch up with us.
const actual_air_bytes: u32 = @intCast(air.instructions.len * 5 + air.extra.items.len * 4);
// We need to let all AIR through eventually, even if one function exceeds `max_air_bytes_in_flight`.
const effective_air_bytes: u32 = @min(actual_air_bytes, max_air_bytes_in_flight);
assert(effective_air_bytes > 0);
const index: Index = index: {
try pool.mutex.lock(io);
defer pool.mutex.unlock(io);
while (pool.free.items.len == 0 or pool.available_air_bytes < effective_air_bytes) {
// The linker thread needs to catch up!
try pool.free_cond.wait(io, &pool.mutex);
}
pool.available_air_bytes -= effective_air_bytes;
break :index pool.free.pop().?;
};
// No turning back now: we're incrementing `pending_codegen_jobs` and starting the worker.
errdefer comptime unreachable;
assert(zcu.pending_codegen_jobs.fetchAdd(1, .monotonic) > 0); // the "Code Generation" node is still active
assert(pool.task_funcs[@intFromEnum(index)] == .none);
pool.task_funcs[@intFromEnum(index)] = func_index;
pool.task_air_bytes[@intFromEnum(index)] = actual_air_bytes;
pool.task_futures[@intFromEnum(index)] = if (move_air) io.async(
workerCodegenOwnedAir,
.{ zcu, func_index, air.* },
) else io.async(
workerCodegenExternalAir,
.{ zcu, func_index, air },
);
return index;
}
pub const Index = enum(u32) {
_,
/// Blocks until codegen has completed, successfully or otherwise.
/// The returned MIR is owned by the caller.
pub fn wait(
index: Index,
pool: *CodegenTaskPool,
io: Io,
) PerThread.RunCodegenError!struct { InternPool.Index, codegen.AnyMir } {
const func = pool.task_funcs[@intFromEnum(index)];
assert(func != .none);
const effective_air_bytes = pool.task_air_bytes[@intFromEnum(index)];
const result = pool.task_futures[@intFromEnum(index)].await(io);
pool.task_funcs[@intFromEnum(index)] = .none;
pool.task_air_bytes[@intFromEnum(index)] = undefined;
pool.task_futures[@intFromEnum(index)] = undefined;
{
pool.mutex.lockUncancelable(io);
defer pool.mutex.unlock(io);
pool.available_air_bytes += effective_air_bytes;
pool.free.appendAssumeCapacity(index);
pool.free_cond.signal(io);
}
return .{ func, try result };
}
};
fn workerCodegenOwnedAir(
zcu: *Zcu,
func_index: InternPool.Index,
orig_air: Air,
) CodegenResult {
// We own `air` now, so we are responsbile for freeing it.
var air = orig_air;
defer air.deinit(zcu.comp.gpa);
const io = zcu.comp.io;
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
const pt: Zcu.PerThread = .activate(zcu, tid);
defer pt.deactivate();
return pt.runCodegen(func_index, &air);
}
fn workerCodegenExternalAir(
zcu: *Zcu,
func_index: InternPool.Index,
air: *Air,
) CodegenResult {
const io = zcu.comp.io;
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
const pt: Zcu.PerThread = .activate(zcu, tid);
defer pt.deactivate();
return pt.runCodegen(func_index, air);
}
};