diff --git a/lib/compiler/aro/aro/CodeGen.zig b/lib/compiler/aro/aro/CodeGen.zig index a746e6357b..5a65b8b6c4 100644 --- a/lib/compiler/aro/aro/CodeGen.zig +++ b/lib/compiler/aro/aro/CodeGen.zig @@ -54,8 +54,9 @@ return_label: Ir.Ref = undefined, compound_assign_dummy: ?Ir.Ref = null, fn fail(c: *CodeGen, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { - var sf = std.heap.stackFallback(1024, c.comp.gpa); - const allocator = sf.get(); + var bfa_buf: [u8]1024 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, c.comp.gpa); + const allocator = bfa.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 94fc6ead72..d6ed6c00df 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1761,8 +1761,9 @@ fn addToSearchPath(comp: *Compilation, include: Include, verbose: bool) !void { try comp.search_path.append(comp.gpa, include); } fn removeDuplicateSearchPaths(comp: *Compilation, start: usize, verbose: bool) !void { - var sf = std.heap.stackFallback(1024, comp.gpa); - const allocator = sf.get(); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, comp.gpa); + const allocator = bfa.allocator(); var seen_includes: std.StringHashMapUnmanaged(void) = .empty; defer seen_includes.deinit(allocator); var seen_frameworks: std.StringHashMapUnmanaged(void) = .empty; @@ -1976,10 +1977,11 @@ const FindInclude = struct { ) Allocator.Error!?Result { const comp = find.comp; - var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); - const sfa = stack_fallback.get(); - const header_path = try std.fmt.allocPrint(sfa, format, args); - defer sfa.free(header_path); + var bfa_buf: [path_buf_stack_limit]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, comp.gpa); + const bfa = bfa_state.allocator(); + const header_path = try std.fmt.allocPrint(bfa, format, args); + defer bfa.free(header_path); find.comp.normalizePath(header_path); const source = comp.addSourceFromPathExtra(header_path, kind) catch |err| switch (err) { @@ -2068,14 +2070,15 @@ pub fn findEmbed( } } - var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); - const sf_allocator = stack_fallback.get(); + var bfa_buf: [path_buf_stack_limit]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, comp.gpa); + const bfa = bfa_state.allocator(); switch (include_type) { .quotes, .cli => { const dir = std.fs.path.dirname(comp.getSource(includer_token_source).path) orelse "."; - const path = try std.fs.path.join(sf_allocator, &.{ dir, filename }); - defer sf_allocator.free(path); + const path = try std.fs.path.join(bfa, &.{ dir, filename }); + defer bfa.free(path); comp.normalizePath(path); if (comp.getPathContents(path, limit)) |some| { errdefer comp.gpa.free(some); @@ -2089,8 +2092,8 @@ pub fn findEmbed( .angle_brackets => {}, } for (comp.embed_dirs.items) |embed_dir| { - const path = try std.fs.path.join(sf_allocator, &.{ embed_dir, filename }); - defer sf_allocator.free(path); + const path = try std.fs.path.join(bfa, &.{ embed_dir, filename }); + defer bfa.free(path); comp.normalizePath(path); if (comp.getPathContents(path, limit)) |some| { errdefer comp.gpa.free(some); diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index 051e084622..fc35e13d96 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -947,8 +947,9 @@ fn addImacros(d: *Driver, path: []const u8) !void { } pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { - var sf = std.heap.stackFallback(1024, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -956,8 +957,9 @@ pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { } pub fn warn(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { - var sf = std.heap.stackFallback(1024, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -1101,8 +1103,9 @@ fn parseTarget(d: *Driver, arch_os_abi: []const u8, opt_cpu_features: ?[]const u } pub fn fatal(d: *Driver, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { - var sf = std.heap.stackFallback(1024, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index afdfbb0ab5..fad33316d2 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -215,8 +215,9 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca assert(codepoint >= 0x80); const prev_total = p.diagnostics.total; - var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); if (!char_info.isC99IdChar(codepoint)) { @@ -429,8 +430,9 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) if (diagnostic.suppress_unless_version) |some| if (!p.comp.langopts.standard.atLeast(some)) return; if (p.diagnostics.effectiveKind(diagnostic) == .off) return; - var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -1537,8 +1539,9 @@ fn staticAssert(p: *Parser) Error!bool { } } else { if (!res.val.toBool(p.comp)) { - var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| { @@ -4837,8 +4840,9 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, asm_tok: TokenIndex const expected_items = 8; // arbitrarily chosen, most assembly will have fewer than 8 inputs/outputs/constraints/names const bytes_needed = expected_items * @sizeOf(Tree.Node.AsmStmt.Operand) + expected_items * 2 * @sizeOf(Node.Index); - var stack_fallback = std.heap.stackFallback(bytes_needed, gpa); - const allocator = stack_fallback.get(); + var bfa_buf: [bytes_needed]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const allocator = bfa.allocator(); var operands: std.ArrayList(Tree.Node.AsmStmt.Operand) = .empty; defer operands.deinit(allocator); @@ -9922,8 +9926,9 @@ fn primaryExpr(p: *Parser) Error!?Result { if (p.func.pretty_ident) |some| { qt = some.qt; } else if (p.func.qt) |func_qt| { - var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory; @@ -10212,8 +10217,9 @@ fn charLiteral(p: *Parser) Error!?Result { }; const max_chars_expected = 4; - var sf = std.heap.stackFallback(max_chars_expected * @sizeOf(u32), gpa); - const allocator = sf.get(); + var bfa_buf: [max_chars_expected]u32 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), gpa); + const allocator = bfa.allocator(); var chars: std.ArrayList(u32) = .empty; defer chars.deinit(allocator); diff --git a/lib/compiler/aro/aro/Pragma.zig b/lib/compiler/aro/aro/Pragma.zig index 1bb43e97fb..51a1df99b2 100644 --- a/lib/compiler/aro/aro/Pragma.zig +++ b/lib/compiler/aro/aro/Pragma.zig @@ -212,8 +212,9 @@ pub const Diagnostic = struct { }; pub fn err(pp: *Preprocessor, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) Compilation.Error!void { - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index 6c35ef3906..15ac3b0a1b 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -1023,8 +1023,9 @@ fn err(pp: *Preprocessor, loc: anytype, diagnostic: Diagnostic, args: anytype) C defer pp.diagnostics.state.suppress_system_headers = old_suppress_system; if (diagnostic.show_in_system_headers) pp.diagnostics.state.suppress_system_headers = false; - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -1052,8 +1053,9 @@ fn err(pp: *Preprocessor, loc: anytype, diagnostic: Diagnostic, args: anytype) C } fn fatal(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) Compilation.Error { - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -1074,8 +1076,9 @@ fn fatalNotFound(pp: *Preprocessor, tok: TokenWithExpansionLocs, filename: []con pp.diagnostics.state.fatal_errors = true; defer pp.diagnostics.state.fatal_errors = old; - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - const allocator = sf.get(); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + const allocator = bfa.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/compiler/aro/aro/pragmas/message.zig b/lib/compiler/aro/aro/pragmas/message.zig index 11f5af5a9a..fa9e19f36e 100644 --- a/lib/compiler/aro/aro/pragmas/message.zig +++ b/lib/compiler/aro/aro/pragmas/message.zig @@ -44,8 +44,9 @@ fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pra const diagnostic: Pragma.Diagnostic = .pragma_message; - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, .{str}) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/text_literal.zig b/lib/compiler/aro/aro/text_literal.zig index 3792b2cca9..73efc5d262 100644 --- a/lib/compiler/aro/aro/text_literal.zig +++ b/lib/compiler/aro/aro/text_literal.zig @@ -315,8 +315,9 @@ pub const Parser = struct { if (p.errored) return; if (p.comp.diagnostics.effectiveKind(diagnostic) == .off) return; - var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/assembly_backend/x86_64.zig b/lib/compiler/aro/assembly_backend/x86_64.zig index 065d8a1f2d..056db55dbe 100644 --- a/lib/compiler/aro/assembly_backend/x86_64.zig +++ b/lib/compiler/aro/assembly_backend/x86_64.zig @@ -68,8 +68,9 @@ fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void { pub fn todo(c: *AsmCodeGen, msg: []const u8, tok: Tree.TokenIndex) Error { const loc: Source.Location = c.tree.tokens.items(.loc)[tok]; - var sf = std.heap.stackFallback(1024, c.comp.gpa); - const allocator = sf.get(); + var bfa_buf: [u8]1024 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, c.comp.gpa); + const allocator = bfa.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 6eb0b3d673..fe664e1ba3 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1197,8 +1197,9 @@ fn printSourceAtAddress( // Initialize the symbol array with space for at least one element, allocating this on the stack // in the common case where only one element is needed - var symbol_fallback_allocator = std.heap.stackFallback(@sizeOf(Symbol) + @alignOf(Symbol) - 1, getDebugInfoAllocator()); - const symbol_allocator = symbol_fallback_allocator.get(); + var buf: [1]Symbol = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&buf), getDebugInfoAllocator()); + const symbol_allocator = bfa.allocator(); var symbols = std.ArrayList(Symbol).initCapacity(symbol_allocator, 1) catch unreachable; defer symbols.deinit(symbol_allocator); diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index 3aff5bd9f1..40a8e9c3c4 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -894,8 +894,9 @@ pub fn resolve(allocator: Allocator, paths: []const []const u8) Allocator.Error! pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) Allocator.Error![]u8 { // Avoid heap allocation when paths.len is <= @bitSizeOf(usize) * 2 // (we use `* 3` because stackFallback uses 1 usize as a length) - var bit_set_allocator_state = std.heap.stackFallback(@sizeOf(usize) * 3, allocator); - const bit_set_allocator = bit_set_allocator_state.get(); + var buf: [3]usize = undefined; + var bit_set_allocator_state: std.heap.BufferFirstAllocator = .init(@ptrCast(&buf), allocator); + const bit_set_allocator = bit_set_allocator_state.allocator(); var relevant_paths = try std.bit_set.DynamicBitSetUnmanaged.initEmpty(bit_set_allocator, paths.len); defer relevant_paths.deinit(bit_set_allocator); @@ -1642,7 +1643,8 @@ fn windowsResolveAgainstCwd( parsed: WindowsPath2(u8), ) ![]u8 { // Space for 256 WTF-16 code units; potentially 3 WTF-8 bytes per WTF-16 code unit - var temp_allocator_state = std.heap.stackFallback(256 * 3, gpa); + var buf: [256 * 3]u8 = undefined; + var temp_allocator_state: std.heap.BufferFirstAllocator = .init(&buf, gpa); return switch (parsed.kind) { .drive_absolute, .unc_absolute, @@ -1668,7 +1670,7 @@ fn windowsResolveAgainstCwd( } }, .drive_relative => blk: { - const temp_allocator = temp_allocator_state.get(); + const temp_allocator = temp_allocator_state.allocator(); const drive_cwd = drive_cwd: { const parsed_cwd = parsePathWindows(u8, cwd); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 9bf1aeffbb..8e3d81d26d 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -12,6 +12,7 @@ const Alignment = std.mem.Alignment; pub const ArenaAllocator = @import("heap/ArenaAllocator.zig"); pub const SmpAllocator = @import("heap/SmpAllocator.zig"); pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig"); +pub const BufferFirstAllocator = @import("heap/BufferFirstAllocator.zig"); pub const PageAllocator = @import("heap/PageAllocator.zig"); pub const WasmAllocator = if (builtin.single_threaded) BrkAllocator else @compileError("unimplemented"); pub const BrkAllocator = @import("heap/BrkAllocator.zig"); @@ -367,113 +368,6 @@ pub const brk_allocator: Allocator = .{ .vtable = &BrkAllocator.vtable, }; -/// Returns a `StackFallbackAllocator` allocating using either a -/// `FixedBufferAllocator` on an array of size `size` and falling back to -/// `fallback_allocator` if that fails. -pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) { - return StackFallbackAllocator(size){ - .buffer = undefined, - .fallback_allocator = fallback_allocator, - .fixed_buffer_allocator = undefined, - }; -} - -/// An allocator that attempts to allocate using a -/// `FixedBufferAllocator` using an array of size `size`. If the -/// allocation fails, it will fall back to using -/// `fallback_allocator`. Easily created with `stackFallback`. -pub fn StackFallbackAllocator(comptime size: usize) type { - return struct { - const Self = @This(); - - buffer: [size]u8, - fallback_allocator: Allocator, - fixed_buffer_allocator: FixedBufferAllocator, - get_called: if (std.debug.runtime_safety) bool else void = - if (std.debug.runtime_safety) false else {}, - - /// This function both fetches a `Allocator` interface to this - /// allocator *and* resets the internal buffer allocator. - pub fn get(self: *Self) Allocator { - if (std.debug.runtime_safety) { - assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();` - self.get_called = true; - } - self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .remap = remap, - .free = free, - }, - }; - } - - /// Unlike most std allocators `StackFallbackAllocator` modifies - /// its internal state before returning an implementation of - /// the`Allocator` interface and therefore also doesn't use - /// the usual `.allocator()` method. - pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead"); - - fn alloc( - ctx: *anyopaque, - len: usize, - alignment: Alignment, - ra: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(ctx)); - return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse - return self.fallback_allocator.rawAlloc(len, alignment, ra); - } - - fn resize( - ctx: *anyopaque, - buf: []u8, - alignment: Alignment, - new_len: usize, - ra: usize, - ) bool { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); - } else { - return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); - } - } - - fn remap( - context: *anyopaque, - memory: []u8, - alignment: Alignment, - new_len: usize, - return_address: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(context)); - if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { - return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); - } else { - return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); - } - } - - fn free( - ctx: *anyopaque, - buf: []u8, - alignment: Alignment, - ra: usize, - ) void { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); - } else { - return self.fallback_allocator.rawFree(buf, alignment, ra); - } - } - }; -} - test c_allocator { if (builtin.link_libc) { try testAllocator(c_allocator); @@ -524,25 +418,6 @@ test ArenaAllocator { try testAllocatorAlignedShrink(allocator); } -test "StackFallbackAllocator" { - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocator(stack_allocator.get()); - } - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorAligned(stack_allocator.get()); - } - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorLargeAlignment(stack_allocator.get()); - } - { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorAlignedShrink(stack_allocator.get()); - } -} - /// This one should not try alignments that exceed what C malloc can handle. pub fn testAllocator(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); @@ -1011,6 +886,7 @@ test { _ = ArenaAllocator; _ = DebugAllocator(.{}); _ = FixedBufferAllocator; + _ = BufferFirstAllocator; if (builtin.single_threaded) { if (builtin.cpu.arch.isWasm() or (builtin.os.tag == .linux and !builtin.link_libc)) { _ = brk_allocator; diff --git a/lib/std/heap/BufferFirstAllocator.zig b/lib/std/heap/BufferFirstAllocator.zig new file mode 100644 index 0000000000..f0b5e8880b --- /dev/null +++ b/lib/std/heap/BufferFirstAllocator.zig @@ -0,0 +1,165 @@ +//! An allocator that attempts to allocate from the given buffer, falling back to +//! `fallback_allocator` if this fails. + +const std = @import("../std.zig"); +const heap = std.heap; +const testing = std.testing; + +const Alignment = std.mem.Alignment; +const Allocator = std.mem.Allocator; +const FixedBufferAllocator = std.heap.FixedBufferAllocator; + +const BufferFirstAllocator = @This(); + +fallback_allocator: Allocator, +fixed_buffer_allocator: FixedBufferAllocator, + +pub fn init(buffer: []u8, fallback_allocator: Allocator) BufferFirstAllocator { + return .{ + .fallback_allocator = fallback_allocator, + .fixed_buffer_allocator = .init(buffer), + }; +} + +pub fn allocator(self: *BufferFirstAllocator) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .remap = remap, + .free = free, + }, + }; +} + +fn alloc( + ctx: *anyopaque, + len: usize, + alignment: Alignment, + ra: usize, +) ?[*]u8 { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(ctx)); + return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse + return self.fallback_allocator.rawAlloc(len, alignment, ra); +} + +fn resize( + ctx: *anyopaque, + buf: []u8, + alignment: Alignment, + new_len: usize, + ra: usize, +) bool { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); + } else { + return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); + } +} + +fn remap( + context: *anyopaque, + memory: []u8, + alignment: Alignment, + new_len: usize, + return_address: usize, +) ?[*]u8 { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(context)); + if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { + return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); + } else { + return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); + } +} + +fn free( + ctx: *anyopaque, + buf: []u8, + alignment: Alignment, + ra: usize, +) void { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); + } else { + return self.fallback_allocator.rawFree(buf, alignment, ra); + } +} + +test "BufferFirstAllocator" { + // Buffer first specific tests + { + var buffer: [10]u8 = undefined; + var bfa_state: BufferFirstAllocator = .init(&buffer, std.testing.allocator); + const bfa = bfa_state.allocator(); + + // We're under the limit, so we should be allocated in the buffer + const txt0 = "hellowrld"; + const buf0 = try bfa.create(@TypeOf(txt0.*)); + buf0.* = txt0.*; + try testing.expect(bfa_state.fixed_buffer_allocator.ownsPtr(buf0.ptr)); + + // We're now over the limit, so we should be allocated from the fallback + const txt1 = "test!"; + const buf1 = try bfa.create(@TypeOf(txt1.*)); + buf1.* = txt1.*; + try testing.expect(!bfa_state.fixed_buffer_allocator.ownsPtr(buf1.ptr)); + + // Free the allocation that took up space in the buffer + try testing.expectEqualStrings(txt0, buf0); + bfa.destroy(buf0); + + // The next allocation would go in the buffer, but it's too big so it doesn't + const txt2 = "qwertyqwerty"; + const buf2 = try bfa.create(@TypeOf(txt2.*)); + buf2.* = txt2.*; + try testing.expect(!bfa_state.fixed_buffer_allocator.ownsPtr(buf2.ptr)); + + // The next allocation is smaller and fits in the buffer + const txt3 = "dvorak"; + const buf3 = try bfa.create(@TypeOf(txt3.*)); + buf3.* = txt3.*; + try testing.expect(bfa_state.fixed_buffer_allocator.ownsPtr(buf3.ptr)); + + // The remainder in the buffer is too small for the following allocation so it falls back + const txt4 = "moretext"; + const buf4 = try bfa.create(@TypeOf(txt4.*)); + buf4.* = txt4.*; + try testing.expect(!bfa_state.fixed_buffer_allocator.ownsPtr(buf4.ptr)); + + // Check equality on the remaining buffers and free them + try testing.expectEqualStrings(txt1, buf1); + bfa.destroy(buf1); + try testing.expectEqualStrings(txt2, buf2); + bfa.destroy(buf2); + try testing.expectEqualStrings(txt3, buf3); + bfa.destroy(buf3); + try testing.expectEqualStrings(txt4, buf4); + bfa.destroy(buf4); + + try testing.expectEqual(0, bfa_state.fixed_buffer_allocator.end_index); + } + + // Standard allocator tests + { + var buf: [4096]u8 = undefined; + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocator(bfa.allocator()); + } + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocatorAligned(bfa.allocator()); + } + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocatorLargeAlignment(bfa.allocator()); + } + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocatorAlignedShrink(bfa.allocator()); + } + } +} diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 2c5270adfe..c737aedda7 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -1776,11 +1776,12 @@ fn structInitExpr( } { - var sfba = std.heap.stackFallback(256, astgen.arena); - const sfba_allocator = sfba.get(); + var bfa_buf: [256]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, astgen.arena); + const bfa = bfa_state.allocator(); var duplicate_names: std.array_hash_map.Auto(Zir.NullTerminatedString, ArrayList(Ast.TokenIndex)) = .empty; - try duplicate_names.ensureTotalCapacity(sfba_allocator, @intCast(struct_init.ast.fields.len)); + try duplicate_names.ensureTotalCapacity(bfa, @intCast(struct_init.ast.fields.len)); // When there aren't errors, use this to avoid a second iteration. var any_duplicate = false; @@ -1789,14 +1790,14 @@ fn structInitExpr( const name_token = tree.firstToken(field) - 2; const name_index = try astgen.identAsString(name_token); - const gop = try duplicate_names.getOrPut(sfba_allocator, name_index); + const gop = try duplicate_names.getOrPut(bfa, name_index); if (gop.found_existing) { - try gop.value_ptr.append(sfba_allocator, name_token); + try gop.value_ptr.append(bfa, name_token); any_duplicate = true; } else { gop.value_ptr.* = .empty; - try gop.value_ptr.append(sfba_allocator, name_token); + try gop.value_ptr.append(bfa, name_token); } } @@ -8404,9 +8405,10 @@ fn tunnelThroughClosure( // Otherwise we need a tunnel. First, figure out the path of namespaces we // are tunneling through. This is usually only going to be one or two, so - // use an SFBA to optimize for the common case. - var sfba = std.heap.stackFallback(@sizeOf(usize) * 2, astgen.arena); - var intermediate_tunnels = try sfba.get().alloc(*Scope.Namespace, num_tunnels - 1); + // use an BFA to optimize for the common case. + var bfa_buf: [2]usize = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), astgen.arena); + var intermediate_tunnels = try bfa.allocator().alloc(*Scope.Namespace, num_tunnels - 1); const root_ns = ns: { var i: usize = num_tunnels - 1; @@ -12926,17 +12928,18 @@ fn scanContainer( next: ?*@This(), }; - // The maps below are allocated into this SFBA to avoid using the GPA for small namespaces. - var sfba_state = std.heap.stackFallback(512, astgen.gpa); - const sfba = sfba_state.get(); + // The maps below are allocated into this BFA to avoid using the GPA for small namespaces. + var bfa_buf: [512]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, astgen.gpa); + const bfa = bfa_state.allocator(); var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; var decltest_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; defer { - names.deinit(sfba); - test_names.deinit(sfba); - decltest_names.deinit(sfba); + names.deinit(bfa); + test_names.deinit(bfa); + decltest_names.deinit(bfa); } var any_duplicates = false; @@ -13008,7 +13011,7 @@ fn scanContainer( else => {}, // unnamed test .string_literal => { const name = try astgen.strLitAsString(test_name_token); - const gop = try test_names.getOrPut(sfba, name.index); + const gop = try test_names.getOrPut(bfa, name.index); if (gop.found_existing) { var e = gop.value_ptr; while (e.next) |n| e = n; @@ -13021,7 +13024,7 @@ fn scanContainer( }, .identifier => { const name = try astgen.identAsString(test_name_token); - const gop = try decltest_names.getOrPut(sfba, name); + const gop = try decltest_names.getOrPut(bfa, name); if (gop.found_existing) { var e = gop.value_ptr; while (e.next) |n| e = n; @@ -13048,7 +13051,7 @@ fn scanContainer( } { - const gop = try names.getOrPut(sfba, name_str_index); + const gop = try names.getOrPut(bfa, name_str_index); const new_ent: NameEntry = .{ .tok = name_token, .next = null, diff --git a/lib/std/zig/ZonGen.zig b/lib/std/zig/ZonGen.zig index 4d4cfcad9f..5326f71bea 100644 --- a/lib/std/zig/ZonGen.zig +++ b/lib/std/zig/ZonGen.zig @@ -427,10 +427,11 @@ fn expr(zg: *ZonGen, node: Ast.Node.Index, dest_node: Zoir.Node.Index) Allocator }); // For short initializers, track the names on the stack rather than going through gpa. - var sfba_state = std.heap.stackFallback(256, gpa); - const sfba = sfba_state.get(); + var bfa_buf: [256]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const bfa = bfa_state.allocator(); var field_names: std.AutoHashMapUnmanaged(Zoir.NullTerminatedString, Ast.TokenIndex) = .empty; - defer field_names.deinit(sfba); + defer field_names.deinit(bfa); var reported_any_duplicate = false; @@ -438,7 +439,7 @@ fn expr(zg: *ZonGen, node: Ast.Node.Index, dest_node: Zoir.Node.Index) Allocator const name_token = tree.firstToken(elem_node) - 2; if (zg.identAsString(name_token)) |name_str| { zg.extra.items[extra_name_idx] = @intFromEnum(name_str); - const gop = try field_names.getOrPut(sfba, name_str); + const gop = try field_names.getOrPut(bfa, name_str); if (gop.found_existing and !reported_any_duplicate) { reported_any_duplicate = true; const earlier_token = gop.value_ptr.*; diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig index 276012b09a..d4316d45a0 100644 --- a/lib/std/zig/llvm/Builder.zig +++ b/lib/std/zig/llvm/Builder.zig @@ -7638,9 +7638,9 @@ pub const Constant = enum(u32) { std.math.big.int.calcToStringLimbsBufferLen(expected_limbs, 10) ]std.math.big.Limb, }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), data.builder.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), data.builder.gpa); + const allocator = bfa.allocator(); const str = bigint.toStringAlloc(allocator, 10, undefined) catch return error.WriteFailed; defer allocator.free(str); try w.writeAll(str); @@ -9209,9 +9209,9 @@ pub fn getIntrinsic( fields: [expected_fields_len]Type, }, }; - var stack align(@max(@alignOf(std.heap.StackFallbackAllocator(0)), @alignOf(ExpectedContents))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const name = name: { { @@ -10607,9 +10607,9 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void std.math.big.int.calcToStringLimbsBufferLen(expected_limbs, 10) ]std.math.big.Limb, }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const limbs = self.metadata_limbs.items[extra.limbs_index..][0..extra.limbs_len]; const bigint: std.math.big.int.Const = .{ @@ -11129,9 +11129,9 @@ fn bigIntConstAssumeCapacity( const bits = type_item.data; const ExpectedContents = [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); var limbs: []std.math.big.Limb = &.{}; defer allocator.free(limbs); diff --git a/src/Air/Legalize.zig b/src/Air/Legalize.zig index 333ec53424..1eeed71bd3 100644 --- a/src/Air/Legalize.zig +++ b/src/Air/Legalize.zig @@ -1122,14 +1122,15 @@ fn scalarizeShuffleOneBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro // // So we must first compute `out_idxs` and `in_idxs`. - var sfba_state = std.heap.stackFallback(512, gpa); - const sfba = sfba_state.get(); + var bfa_buf: [512]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const bfa = bfa_state.allocator(); - const out_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(out_idxs_buf); + const out_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(out_idxs_buf); - const in_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(in_idxs_buf); + const in_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(in_idxs_buf); var n: usize = 0; for (shuffle.mask, 0..) |mask, out_idx| switch (mask.unwrap()) { @@ -1143,8 +1144,8 @@ fn scalarizeShuffleOneBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro const init_val: Value = init: { const undef_val = try pt.undefValue(shuffle.result_ty.childType(zcu)); - const elems = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(elems); + const elems = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(elems); for (shuffle.mask, elems) |mask, *elem| elem.* = switch (mask.unwrap()) { .value => |ip_index| ip_index, .elem => undef_val.toIntern(), @@ -1212,14 +1213,15 @@ fn scalarizeShuffleTwoBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro // %8 = br(%1, %7) // }) - var sfba_state = std.heap.stackFallback(512, gpa); - const sfba = sfba_state.get(); + var bfa_buf: [512]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const bfa = bfa_state.allocator(); - const out_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(out_idxs_buf); + const out_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(out_idxs_buf); - const in_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(in_idxs_buf); + const in_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(in_idxs_buf); // Iterate `shuffle.mask` before doing anything, because modifying AIR invalidates it. const out_idxs_a, const in_idxs_a, const out_idxs_b, const in_idxs_b = idxs: { @@ -2394,9 +2396,9 @@ fn packedStoreBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Error!Air.In }).toRef(), .rhs = Air.internedToRef((keep_mask: { const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), zcu.gpa); - const gpa = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), zcu.gpa); + const gpa = bfa.allocator(); var mask_big_int: std.math.big.int.Mutable = .{ .limbs = try gpa.alloc( @@ -2489,11 +2491,12 @@ fn packedAggregateInitBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro const agg_ty = orig_ty_pl.ty.toType(); const agg_field_count = agg_ty.structFieldCount(zcu); - var sfba_state = std.heap.stackFallback(@sizeOf([4 * 32 + 2]Air.Inst.Index), gpa); - const sfba = sfba_state.get(); + var bfa_buf: [4 * 32 + 2]Air.Inst.Index = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), gpa); + const bfa = bfa_state.allocator(); - const inst_buf = try sfba.alloc(Air.Inst.Index, 4 * agg_field_count + 2); - defer sfba.free(inst_buf); + const inst_buf = try bfa.alloc(Air.Inst.Index, 4 * agg_field_count + 2); + defer bfa.free(inst_buf); var main_block: Block = .init(inst_buf); try l.air_instructions.ensureUnusedCapacity(gpa, inst_buf.len); diff --git a/src/Value.zig b/src/Value.zig index cc0e577f84..2f56c6301e 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -882,15 +882,16 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value { else => unreachable, }; // Avoid hitting gpa for accesses to small packed structs - var sfba_state = std.heap.stackFallback(128, zcu.comp.gpa); - const sfba = sfba_state.get(); - const buf = try sfba.alloc(u8, @intCast((ty.bitSize(zcu) + 7) / 8)); - defer sfba.free(buf); + var bfa_buf: [128]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, zcu.comp.gpa); + const bfa = bfa_state.allocator(); + const buf = try bfa.alloc(u8, @intCast((ty.bitSize(zcu) + 7) / 8)); + defer bfa.free(buf); int_val.writeToPackedMemory(zcu, buf, 0) catch |err| switch (err) { error.ReinterpretDeclRef => unreachable, // it's an integer error.OutOfMemory => |e| return e, }; - return Value.readFromPackedMemory(field_ty, pt, buf, field_bit_offset, sfba) catch |err| switch (err) { + return Value.readFromPackedMemory(field_ty, pt, buf, field_bit_offset, bfa) catch |err| switch (err) { error.IllDefinedMemoryLayout => unreachable, // it's a bitpack error.OutOfMemory => |e| return e, }; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7641790aae..519a4f0bb7 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4841,8 +4841,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { { const asm_source = unwrapped_asm.source; - var stack = std.heap.stackFallback(256, f.dg.gpa); - const allocator = stack.get(); + var bfa_buf: [256]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, f.dg.gpa); + const allocator = bfa.allocator(); const fixed_asm_source = try allocator.alloc(u8, asm_source.len); defer allocator.free(fixed_asm_source); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ef51f3ba90..31cc88b6a2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3605,11 +3605,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, elems.len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, elems.len); @@ -3636,11 +3634,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, len_including_sentinel); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, len_including_sentinel); @@ -3668,11 +3664,9 @@ pub const Object = struct { switch (aggregate.storage) { .bytes, .elems => { const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, vector_type.len); defer allocator.free(vals); @@ -3701,11 +3695,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, llvm_len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, llvm_len); @@ -3779,11 +3771,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, llvm_len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, llvm_len); diff --git a/src/codegen/llvm/FuncGen.zig b/src/codegen/llvm/FuncGen.zig index 23a0d01a86..37b0df3d85 100644 --- a/src/codegen/llvm/FuncGen.zig +++ b/src/codegen/llvm/FuncGen.zig @@ -3530,11 +3530,9 @@ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) const inst_llvm_ty = try o.lowerType(inst_ty); const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const scalar_bits = scalar_ty.intInfo(zcu).bits; var smin_big_int: std.math.big.int.Mutable = .{ @@ -3616,11 +3614,9 @@ fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) Allo } if (scalar_ty.isSignedInt(zcu)) { const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const scalar_bits = scalar_ty.intInfo(zcu).bits; var smin_big_int: std.math.big.int.Mutable = .{ diff --git a/src/codegen/riscv64/CodeGen.zig b/src/codegen/riscv64/CodeGen.zig index cea33b7c66..6b65704a4b 100644 --- a/src/codegen/riscv64/CodeGen.zig +++ b/src/codegen/riscv64/CodeGen.zig @@ -671,11 +671,12 @@ fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, compt for (deaths) |death| try func.processDeath(death); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - if (opts.update_tracking) {} else std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); + const stack_buf_len = if (opts.update_tracking) 0 else 1; + var bfa_buf: [stack_buf_len]ExpectedContents = undefined; + var bfa = if (opts.update_tracking) {} else std.heap.BufferFirstAllocator.init(@ptrCast(&bfa_buf), func.gpa); var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity( - stack.get(), + bfa.allocator(), @typeInfo(ExpectedContents).array.len, ); defer if (!opts.update_tracking) { @@ -4807,9 +4808,9 @@ fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ExpectedContents = extern struct { vals: [expected_num_args][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), func.gpa); + const allocator = bfa.allocator(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); diff --git a/src/codegen/x86_64/CodeGen.zig b/src/codegen/x86_64/CodeGen.zig index 452d7e2136..d939047933 100644 --- a/src/codegen/x86_64/CodeGen.zig +++ b/src/codegen/x86_64/CodeGen.zig @@ -173820,9 +173820,9 @@ fn genLazy(cg: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void { var err_temp = try cg.tempInit(err_ty, err_mcv); const ExpectedContents = [32]Mir.Inst.Index; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); const relocs = try allocator.alloc(Mir.Inst.Index, error_set_type.names.len); defer allocator.free(relocs); @@ -174220,11 +174220,12 @@ fn restoreState(self: *CodeGen, state: State, deaths: []const Air.Inst.Index, co for (deaths) |death| try self.processDeath(death, .{ .emit_instructions = opts.emit_instructions }); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - if (opts.update_tracking) {} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const bfa_buf_len = if (opts.update_tracking) 0 else 1; + var bfa_buf: [bfa_buf_len]ExpectedContents = undefined; + var stack = if (opts.update_tracking) {} else std.heap.BufferFirstAllocator.init(@ptrCast(&bfa_buf), self.gpa); var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity( - stack.get(), + stack.allocator(), @typeInfo(ExpectedContents).array.len, ); defer if (!opts.update_tracking) { @@ -175929,9 +175930,9 @@ fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif tys: [32][@sizeOf(Type)]u8 align(@alignOf(Type)), vals: [32][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: [1]ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); @@ -175985,9 +175986,9 @@ fn genCall(self: *CodeGen, info: union(enum) { frame_indices: [32]FrameIndex, reg_locks: [32][@sizeOf(?RegisterLock)]u8 align(@alignOf(?RegisterLock)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len); defer allocator.free(var_args); @@ -176588,9 +176589,9 @@ fn lowerSwitchBr( bigint_limbs: [std.math.big.int.calcTwosCompLimbCount(1 << 10)]std.math.big.Limb, relocs: [1 << 6]Mir.Inst.Index, }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); const state = try cg.saveState(); @@ -181154,9 +181155,9 @@ fn resolveCallingConventionValues( const ExpectedContents = extern struct { param_types: [32][@sizeOf(Type)]u8 align(@alignOf(Type)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); const param_types = try allocator.alloc(Type, fn_info.param_types.len + var_args.len); defer allocator.free(param_types); @@ -188706,9 +188707,9 @@ const Select = struct { } const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(1 << 10)]std.math.big.Limb; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); var res_big_int: std.math.big.int.Mutable = .{ .limbs = try allocator.alloc( std.math.big.Limb, diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 4ec3598c34..fe13b20f46 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -2690,8 +2690,9 @@ pub fn ensureUnusedRelocCapacity(elf: *Elf, loc_si: Symbol.Index, len: usize) !v const shndx = loc_si.shndx(elf); const sh = shndx.get(elf); if (sh.rela_si == .null) { - var stack = std.heap.stackFallback(32, gpa); - const allocator = stack.get(); + var bfa_buf: [32]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const allocator = bfa.allocator(); const rela_name = try std.fmt.allocPrint(allocator, ".rela{s}", .{elf.sectionName(sh.si)});