From 73ecc6333fcc57dce51a5ca93e0740fe6a3346cf Mon Sep 17 00:00:00 2001 From: jmcaine Date: Fri, 10 Apr 2026 09:06:09 -0700 Subject: [PATCH 1/5] std: implement heap.StackFirstAllocator second attempt --- lib/std/heap.zig | 1 + lib/std/heap/StackFirstAllocator.zig | 123 +++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 lib/std/heap/StackFirstAllocator.zig diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 9bf1aeffbb..1b979fd024 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -12,6 +12,7 @@ const Alignment = std.mem.Alignment; pub const ArenaAllocator = @import("heap/ArenaAllocator.zig"); pub const SmpAllocator = @import("heap/SmpAllocator.zig"); pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig"); +pub const StackFirstAllocator = @import("heap/StackFirstAllocator.zig"); pub const PageAllocator = @import("heap/PageAllocator.zig"); pub const WasmAllocator = if (builtin.single_threaded) BrkAllocator else @compileError("unimplemented"); pub const BrkAllocator = @import("heap/BrkAllocator.zig"); diff --git a/lib/std/heap/StackFirstAllocator.zig b/lib/std/heap/StackFirstAllocator.zig new file mode 100644 index 0000000000..9b3e7a2d08 --- /dev/null +++ b/lib/std/heap/StackFirstAllocator.zig @@ -0,0 +1,123 @@ +//! A "composite" allocator that attempts to allocate first on the stack, using +//! the provided FixedBufferAllocator; upon failure, the provided secondary +//! allocator is used. reset() is NOT provided, even though available for the +//! (primary) FixedBufferAllocator, because it may not be available for the +//! provided secondary allocator (so callers must call reset() on underlying +//! allocators, themselves, when desirable). + +const std = @import("../std.zig"); +const mem = std.mem; +const Allocator = mem.Allocator; +const FixedBufferAllocator = std.heap.FixedBufferAllocator; +const assert = std.debug.assert; + +const StackFirstAllocator = @This(); + +primary: FixedBufferAllocator, +secondary: Allocator, + +pub fn init(buffer: []u8, secondary_allocator: Allocator) StackFirstAllocator { + return .{ + .primary = .init(buffer), + .secondary = secondary_allocator, + }; +} + +pub fn allocator(self: *StackFirstAllocator) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .remap = remap, + .free = free, + }, + }; +} + +pub fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 { + const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); + return FixedBufferAllocator.alloc(&self.primary, len, alignment, ret_addr) orelse + self.secondary.rawAlloc(len, alignment, ret_addr); +} + +pub fn resize(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool { + const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); + return if (self.primary.ownsPtr(memory.ptr)) + FixedBufferAllocator.resize(&self.primary, memory, alignment, new_len, ret_addr) + else + self.secondary.rawResize(memory, alignment, new_len, ret_addr); +} + +pub fn remap(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 { + const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); + return if (self.primary.ownsPtr(memory.ptr)) + FixedBufferAllocator.remap(&self.primary, memory, alignment, new_len, ret_addr) + else + self.secondary.rawRemap(memory, alignment, new_len, ret_addr); +} + +pub fn free(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, ret_addr: usize) void { + const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); + return if (self.primary.ownsPtr(memory.ptr)) + FixedBufferAllocator.free(&self.primary, memory, alignment, ret_addr) + else + self.secondary.rawFree(memory, alignment, ret_addr); +} + +test StackFirstAllocator { + var arena = std.heap.ArenaAllocator.init(std.testing.allocator); + defer arena.deinit(); + var buffer: [10]u8 = undefined; + var sfa = StackFirstAllocator.init(&buffer, arena.allocator()); + + const expect = std.testing.expect; + const expectEqualStrings = std.testing.expectEqualStrings; + + const al = sfa.allocator(); + const txt = "0123456789"; + + const dest = try al.alloc(u8, txt.len); + @memcpy(dest, txt); + try expectEqualStrings(txt, dest); + try expect(sfa.primary.ownsPtr(dest.ptr)); + + const txt2 = "abcde"; + const dest2 = try al.alloc(u8, txt2.len); + @memcpy(dest2, txt2); + try expectEqualStrings(txt2, dest2); + try expect(!sfa.primary.ownsPtr(dest2.ptr)); + + sfa.primary.reset(); + + const txt3 = "0123"; + const dest3 = try al.alloc(u8, txt3.len); + @memcpy(dest3, txt3); + try expectEqualStrings(txt3, dest3); + try expect(sfa.primary.ownsPtr(dest3.ptr)); + + sfa.primary.reset(); + //arena.reset(); // unnecessary, but allowed (note `defer arena.deinit()` above) + + // stock tests: + { + var buf: [16]u8 = undefined; + var a = StackFirstAllocator.init(&buf, std.testing.allocator); + try std.heap.testAllocator(a.allocator()); + } + { + var buf: [16]u8 = undefined; + var a = StackFirstAllocator.init(&buf, std.testing.allocator); + try std.heap.testAllocatorAligned(a.allocator()); + } + { + var buf: [16]u8 = undefined; + var a = StackFirstAllocator.init(&buf, std.testing.allocator); + try std.heap.testAllocatorLargeAlignment(a.allocator()); + } + { + var buf: [16]u8 = undefined; + var a = StackFirstAllocator.init(&buf, std.testing.allocator); + try std.heap.testAllocatorAlignedShrink(a.allocator()); + } +} From a4d8e9608e0cb2704bad00b2f87af078c6d4ed59 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 13 Apr 2026 02:55:07 -0700 Subject: [PATCH 2/5] Reworks the stack fallback allocator The previous approach had a few downsides: 1. You couldn't set the alignment of the internal buffer. Many callers in the standard library trying to use this for a small vec style optimization worked around this by setting the alignment for the struct itself, this ends up very verbose and also assumes a specific layout for the struct which isn't guaranteed. 2. It was generic over the size of the buffer. This type is used a lot in std with various sizes. 3. It has an awkward API where you had to call get which mutated the type unlike all other allocators, and then had a runtime check to make sure you didn't get this wrong. The new approach resolves all of these issues by just taking the buf as an argument. This is particularly amenable to smallvec style optimizations: you can just declare the buf as an array of the item you want to allocate to get the exact minimum size. --- lib/std/heap.zig | 191 +++++++++++++++++++++-------------------------- 1 file changed, 84 insertions(+), 107 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 1b979fd024..1e234ac4a2 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -368,112 +368,88 @@ pub const brk_allocator: Allocator = .{ .vtable = &BrkAllocator.vtable, }; -/// Returns a `StackFallbackAllocator` allocating using either a -/// `FixedBufferAllocator` on an array of size `size` and falling back to -/// `fallback_allocator` if that fails. -pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) { - return StackFallbackAllocator(size){ - .buffer = undefined, - .fallback_allocator = fallback_allocator, - .fixed_buffer_allocator = undefined, - }; -} +/// An allocator that attempts to allocate from the given buffer, falling back to +/// `fallback_allocator` if this fails. +pub const StackFallbackAllocator = struct { + const Self = @This(); -/// An allocator that attempts to allocate using a -/// `FixedBufferAllocator` using an array of size `size`. If the -/// allocation fails, it will fall back to using -/// `fallback_allocator`. Easily created with `stackFallback`. -pub fn StackFallbackAllocator(comptime size: usize) type { - return struct { - const Self = @This(); + fallback_allocator: Allocator, + fixed_buffer_allocator: FixedBufferAllocator, - buffer: [size]u8, - fallback_allocator: Allocator, - fixed_buffer_allocator: FixedBufferAllocator, - get_called: if (std.debug.runtime_safety) bool else void = - if (std.debug.runtime_safety) false else {}, + pub fn init(buf: []u8, fallback_allocator: Allocator) Self { + return .{ + .fallback_allocator = fallback_allocator, + .fixed_buffer_allocator = .init(buf), + }; + } - /// This function both fetches a `Allocator` interface to this - /// allocator *and* resets the internal buffer allocator. - pub fn get(self: *Self) Allocator { - if (std.debug.runtime_safety) { - assert(!self.get_called); // `get` called multiple times; instead use `const allocator = stackFallback(N).get();` - self.get_called = true; - } - self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .remap = remap, - .free = free, - }, - }; + pub fn allocator(self: *Self) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .remap = remap, + .free = free, + }, + }; + } + + fn alloc( + ctx: *anyopaque, + len: usize, + alignment: Alignment, + ra: usize, + ) ?[*]u8 { + const self: *Self = @ptrCast(@alignCast(ctx)); + return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse + return self.fallback_allocator.rawAlloc(len, alignment, ra); + } + + fn resize( + ctx: *anyopaque, + buf: []u8, + alignment: Alignment, + new_len: usize, + ra: usize, + ) bool { + const self: *Self = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); + } else { + return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); } + } - /// Unlike most std allocators `StackFallbackAllocator` modifies - /// its internal state before returning an implementation of - /// the`Allocator` interface and therefore also doesn't use - /// the usual `.allocator()` method. - pub const allocator = @compileError("use 'const allocator = stackFallback(N).get();' instead"); - - fn alloc( - ctx: *anyopaque, - len: usize, - alignment: Alignment, - ra: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(ctx)); - return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse - return self.fallback_allocator.rawAlloc(len, alignment, ra); + fn remap( + context: *anyopaque, + memory: []u8, + alignment: Alignment, + new_len: usize, + return_address: usize, + ) ?[*]u8 { + const self: *Self = @ptrCast(@alignCast(context)); + if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { + return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); + } else { + return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); } + } - fn resize( - ctx: *anyopaque, - buf: []u8, - alignment: Alignment, - new_len: usize, - ra: usize, - ) bool { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); - } else { - return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); - } + fn free( + ctx: *anyopaque, + buf: []u8, + alignment: Alignment, + ra: usize, + ) void { + const self: *Self = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); + } else { + return self.fallback_allocator.rawFree(buf, alignment, ra); } - - fn remap( - context: *anyopaque, - memory: []u8, - alignment: Alignment, - new_len: usize, - return_address: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(context)); - if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { - return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); - } else { - return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); - } - } - - fn free( - ctx: *anyopaque, - buf: []u8, - alignment: Alignment, - ra: usize, - ) void { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); - } else { - return self.fallback_allocator.rawFree(buf, alignment, ra); - } - } - }; -} + } +}; test c_allocator { if (builtin.link_libc) { @@ -525,22 +501,23 @@ test ArenaAllocator { try testAllocatorAlignedShrink(allocator); } -test "StackFallbackAllocator" { +test StackFallbackAllocator { + var buf: [4096]u8 = undefined; { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocator(stack_allocator.get()); + var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); + try testAllocator(stack_allocator.allocator()); } { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorAligned(stack_allocator.get()); + var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); + try testAllocatorAligned(stack_allocator.allocator()); } { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorLargeAlignment(stack_allocator.get()); + var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); + try testAllocatorLargeAlignment(stack_allocator.allocator()); } { - var stack_allocator = stackFallback(4096, std.testing.allocator); - try testAllocatorAlignedShrink(stack_allocator.get()); + var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); + try testAllocatorAlignedShrink(stack_allocator.allocator()); } } From 8c96487bb95e45a86f35e7cfa6012c265c0fdcca Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Mon, 13 Apr 2026 02:59:29 -0700 Subject: [PATCH 3/5] Updates all uses of StackFallbackAllocator --- lib/compiler/aro/aro/CodeGen.zig | 5 ++- lib/compiler/aro/aro/Compilation.zig | 15 ++++--- lib/compiler/aro/aro/Driver.zig | 15 ++++--- lib/compiler/aro/aro/Parser.zig | 30 ++++++++------ lib/compiler/aro/aro/Pragma.zig | 5 ++- lib/compiler/aro/aro/Preprocessor.zig | 15 ++++--- lib/compiler/aro/aro/pragmas/message.zig | 5 ++- lib/compiler/aro/aro/text_literal.zig | 5 ++- lib/compiler/aro/assembly_backend/x86_64.zig | 5 ++- lib/std/fs/path.zig | 10 +++-- lib/std/zig/AstGen.zig | 15 ++++--- lib/std/zig/ZonGen.zig | 5 ++- lib/std/zig/llvm/Builder.zig | 24 +++++------ src/Air/Legalize.zig | 21 ++++++---- src/Value.zig | 5 ++- src/codegen/c.zig | 5 ++- src/codegen/llvm.zig | 40 +++++++----------- src/codegen/llvm/FuncGen.zig | 16 +++----- src/codegen/riscv64/CodeGen.zig | 13 +++--- src/codegen/x86_64/CodeGen.zig | 43 ++++++++++---------- src/link/Elf2.zig | 5 ++- 21 files changed, 161 insertions(+), 141 deletions(-) diff --git a/lib/compiler/aro/aro/CodeGen.zig b/lib/compiler/aro/aro/CodeGen.zig index a746e6357b..ecccab3c1e 100644 --- a/lib/compiler/aro/aro/CodeGen.zig +++ b/lib/compiler/aro/aro/CodeGen.zig @@ -54,8 +54,9 @@ return_label: Ir.Ref = undefined, compound_assign_dummy: ?Ir.Ref = null, fn fail(c: *CodeGen, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { - var sf = std.heap.stackFallback(1024, c.comp.gpa); - const allocator = sf.get(); + var sf_buf: [u8]1024 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, c.comp.gpa); + const allocator = sf.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 94fc6ead72..2b6a0f39a8 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1761,8 +1761,9 @@ fn addToSearchPath(comp: *Compilation, include: Include, verbose: bool) !void { try comp.search_path.append(comp.gpa, include); } fn removeDuplicateSearchPaths(comp: *Compilation, start: usize, verbose: bool) !void { - var sf = std.heap.stackFallback(1024, comp.gpa); - const allocator = sf.get(); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, comp.gpa); + const allocator = sf.allocator(); var seen_includes: std.StringHashMapUnmanaged(void) = .empty; defer seen_includes.deinit(allocator); var seen_frameworks: std.StringHashMapUnmanaged(void) = .empty; @@ -1976,8 +1977,9 @@ const FindInclude = struct { ) Allocator.Error!?Result { const comp = find.comp; - var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); - const sfa = stack_fallback.get(); + var stack_fallback_buf: [path_buf_stack_limit]u8 = undefined; + var stack_fallback: std.heap.StackFallbackAllocator = .init(&stack_fallback_buf, comp.gpa); + const sfa = stack_fallback.allocator(); const header_path = try std.fmt.allocPrint(sfa, format, args); defer sfa.free(header_path); find.comp.normalizePath(header_path); @@ -2068,8 +2070,9 @@ pub fn findEmbed( } } - var stack_fallback = std.heap.stackFallback(path_buf_stack_limit, comp.gpa); - const sf_allocator = stack_fallback.get(); + var stack_fallback_buf: [path_buf_stack_limit]u8 = undefined; + var stack_fallback: std.heap.StackFallbackAllocator = .init(&stack_fallback_buf, comp.gpa); + const sf_allocator = stack_fallback.allocator(); switch (include_type) { .quotes, .cli => { diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index 051e084622..a72837b7fc 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -947,8 +947,9 @@ fn addImacros(d: *Driver, path: []const u8) !void { } pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { - var sf = std.heap.stackFallback(1024, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -956,8 +957,9 @@ pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { } pub fn warn(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { - var sf = std.heap.stackFallback(1024, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -1101,8 +1103,9 @@ fn parseTarget(d: *Driver, arch_os_abi: []const u8, opt_cpu_features: ?[]const u } pub fn fatal(d: *Driver, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { - var sf = std.heap.stackFallback(1024, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index afdfbb0ab5..879140fff0 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -215,8 +215,9 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca assert(codepoint >= 0x80); const prev_total = p.diagnostics.total; - var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); if (!char_info.isC99IdChar(codepoint)) { @@ -429,8 +430,9 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) if (diagnostic.suppress_unless_version) |some| if (!p.comp.langopts.standard.atLeast(some)) return; if (p.diagnostics.effectiveKind(diagnostic) == .off) return; - var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -1537,8 +1539,9 @@ fn staticAssert(p: *Parser) Error!bool { } } else { if (!res.val.toBool(p.comp)) { - var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| { @@ -4837,8 +4840,9 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, asm_tok: TokenIndex const expected_items = 8; // arbitrarily chosen, most assembly will have fewer than 8 inputs/outputs/constraints/names const bytes_needed = expected_items * @sizeOf(Tree.Node.AsmStmt.Operand) + expected_items * 2 * @sizeOf(Node.Index); - var stack_fallback = std.heap.stackFallback(bytes_needed, gpa); - const allocator = stack_fallback.get(); + var stack_fallback_buf: [bytes_needed]u8 = undefined; + var stack_fallback: std.heap.StackFallbackAllocator = .init(&stack_fallback_buf, gpa); + const allocator = stack_fallback.allocator(); var operands: std.ArrayList(Tree.Node.AsmStmt.Operand) = .empty; defer operands.deinit(allocator); @@ -9922,8 +9926,9 @@ fn primaryExpr(p: *Parser) Error!?Result { if (p.func.pretty_ident) |some| { qt = some.qt; } else if (p.func.qt) |func_qt| { - var sf = std.heap.stackFallback(1024, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory; @@ -10212,8 +10217,9 @@ fn charLiteral(p: *Parser) Error!?Result { }; const max_chars_expected = 4; - var sf = std.heap.stackFallback(max_chars_expected * @sizeOf(u32), gpa); - const allocator = sf.get(); + var sf_buf: [max_chars_expected]u32 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(@ptrCast(&sf_buf), gpa); + const allocator = sf.allocator(); var chars: std.ArrayList(u32) = .empty; defer chars.deinit(allocator); diff --git a/lib/compiler/aro/aro/Pragma.zig b/lib/compiler/aro/aro/Pragma.zig index 1bb43e97fb..dc8511482b 100644 --- a/lib/compiler/aro/aro/Pragma.zig +++ b/lib/compiler/aro/aro/Pragma.zig @@ -212,8 +212,9 @@ pub const Diagnostic = struct { }; pub fn err(pp: *Preprocessor, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) Compilation.Error!void { - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index 6c35ef3906..90c1517386 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -1023,8 +1023,9 @@ fn err(pp: *Preprocessor, loc: anytype, diagnostic: Diagnostic, args: anytype) C defer pp.diagnostics.state.suppress_system_headers = old_suppress_system; if (diagnostic.show_in_system_headers) pp.diagnostics.state.suppress_system_headers = false; - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -1052,8 +1053,9 @@ fn err(pp: *Preprocessor, loc: anytype, diagnostic: Diagnostic, args: anytype) C } fn fatal(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) Compilation.Error { - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -1074,8 +1076,9 @@ fn fatalNotFound(pp: *Preprocessor, tok: TokenWithExpansionLocs, filename: []con pp.diagnostics.state.fatal_errors = true; defer pp.diagnostics.state.fatal_errors = old; - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - const allocator = sf.get(); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); + const allocator = sf.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/compiler/aro/aro/pragmas/message.zig b/lib/compiler/aro/aro/pragmas/message.zig index 11f5af5a9a..0aa889b2db 100644 --- a/lib/compiler/aro/aro/pragmas/message.zig +++ b/lib/compiler/aro/aro/pragmas/message.zig @@ -44,8 +44,9 @@ fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pra const diagnostic: Pragma.Diagnostic = .pragma_message; - var sf = std.heap.stackFallback(1024, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, .{str}) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/text_literal.zig b/lib/compiler/aro/aro/text_literal.zig index 3792b2cca9..e0aeaefa7b 100644 --- a/lib/compiler/aro/aro/text_literal.zig +++ b/lib/compiler/aro/aro/text_literal.zig @@ -315,8 +315,9 @@ pub const Parser = struct { if (p.errored) return; if (p.comp.diagnostics.effectiveKind(diagnostic) == .off) return; - var sf = std.heap.stackFallback(1024, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.get()); + var sf_buf: [1024]u8 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); defer allocating.deinit(); formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/assembly_backend/x86_64.zig b/lib/compiler/aro/assembly_backend/x86_64.zig index 065d8a1f2d..055557c378 100644 --- a/lib/compiler/aro/assembly_backend/x86_64.zig +++ b/lib/compiler/aro/assembly_backend/x86_64.zig @@ -68,8 +68,9 @@ fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void { pub fn todo(c: *AsmCodeGen, msg: []const u8, tok: Tree.TokenIndex) Error { const loc: Source.Location = c.tree.tokens.items(.loc)[tok]; - var sf = std.heap.stackFallback(1024, c.comp.gpa); - const allocator = sf.get(); + var sf_buf: [u8]1024 = undefined; + var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, c.comp.gpa); + const allocator = sf.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index 3aff5bd9f1..f1577aad28 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -894,8 +894,9 @@ pub fn resolve(allocator: Allocator, paths: []const []const u8) Allocator.Error! pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) Allocator.Error![]u8 { // Avoid heap allocation when paths.len is <= @bitSizeOf(usize) * 2 // (we use `* 3` because stackFallback uses 1 usize as a length) - var bit_set_allocator_state = std.heap.stackFallback(@sizeOf(usize) * 3, allocator); - const bit_set_allocator = bit_set_allocator_state.get(); + var buf: [3]usize = undefined; + var bit_set_allocator_state: std.heap.StackFallbackAllocator = .init(@ptrCast(&buf), allocator); + const bit_set_allocator = bit_set_allocator_state.allocator(); var relevant_paths = try std.bit_set.DynamicBitSetUnmanaged.initEmpty(bit_set_allocator, paths.len); defer relevant_paths.deinit(bit_set_allocator); @@ -1642,7 +1643,8 @@ fn windowsResolveAgainstCwd( parsed: WindowsPath2(u8), ) ![]u8 { // Space for 256 WTF-16 code units; potentially 3 WTF-8 bytes per WTF-16 code unit - var temp_allocator_state = std.heap.stackFallback(256 * 3, gpa); + var buf: [256 * 3]u8 = undefined; + var temp_allocator_state: std.heap.StackFallbackAllocator = .init(&buf, gpa); return switch (parsed.kind) { .drive_absolute, .unc_absolute, @@ -1668,7 +1670,7 @@ fn windowsResolveAgainstCwd( } }, .drive_relative => blk: { - const temp_allocator = temp_allocator_state.get(); + const temp_allocator = temp_allocator_state.allocator(); const drive_cwd = drive_cwd: { const parsed_cwd = parsePathWindows(u8, cwd); diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 2c5270adfe..31ed0da001 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -1776,8 +1776,9 @@ fn structInitExpr( } { - var sfba = std.heap.stackFallback(256, astgen.arena); - const sfba_allocator = sfba.get(); + var sfba_buf: [256]u8 = undefined; + var sfba: std.heap.StackFallbackAllocator = .init(&sfba_buf, astgen.arena); + const sfba_allocator = sfba.allocator(); var duplicate_names: std.array_hash_map.Auto(Zir.NullTerminatedString, ArrayList(Ast.TokenIndex)) = .empty; try duplicate_names.ensureTotalCapacity(sfba_allocator, @intCast(struct_init.ast.fields.len)); @@ -8405,8 +8406,9 @@ fn tunnelThroughClosure( // Otherwise we need a tunnel. First, figure out the path of namespaces we // are tunneling through. This is usually only going to be one or two, so // use an SFBA to optimize for the common case. - var sfba = std.heap.stackFallback(@sizeOf(usize) * 2, astgen.arena); - var intermediate_tunnels = try sfba.get().alloc(*Scope.Namespace, num_tunnels - 1); + var sfba_buf: [2]usize = undefined; + var sfba: std.heap.StackFallbackAllocator = .init(@ptrCast(&sfba_buf), astgen.arena); + var intermediate_tunnels = try sfba.allocator().alloc(*Scope.Namespace, num_tunnels - 1); const root_ns = ns: { var i: usize = num_tunnels - 1; @@ -12927,8 +12929,9 @@ fn scanContainer( }; // The maps below are allocated into this SFBA to avoid using the GPA for small namespaces. - var sfba_state = std.heap.stackFallback(512, astgen.gpa); - const sfba = sfba_state.get(); + var sfba_buf: [512]u8 = undefined; + var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, astgen.gpa); + const sfba = sfba_state.allocator(); var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; diff --git a/lib/std/zig/ZonGen.zig b/lib/std/zig/ZonGen.zig index 4d4cfcad9f..09842dcf7e 100644 --- a/lib/std/zig/ZonGen.zig +++ b/lib/std/zig/ZonGen.zig @@ -427,8 +427,9 @@ fn expr(zg: *ZonGen, node: Ast.Node.Index, dest_node: Zoir.Node.Index) Allocator }); // For short initializers, track the names on the stack rather than going through gpa. - var sfba_state = std.heap.stackFallback(256, gpa); - const sfba = sfba_state.get(); + var sfba_buf: [256]u8 = undefined; + var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, gpa); + const sfba = sfba_state.allocator(); var field_names: std.AutoHashMapUnmanaged(Zoir.NullTerminatedString, Ast.TokenIndex) = .empty; defer field_names.deinit(sfba); diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig index 276012b09a..accd4d6efc 100644 --- a/lib/std/zig/llvm/Builder.zig +++ b/lib/std/zig/llvm/Builder.zig @@ -7638,9 +7638,9 @@ pub const Constant = enum(u32) { std.math.big.int.calcToStringLimbsBufferLen(expected_limbs, 10) ]std.math.big.Limb, }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), data.builder.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), data.builder.gpa); + const allocator = stack.allocator(); const str = bigint.toStringAlloc(allocator, 10, undefined) catch return error.WriteFailed; defer allocator.free(str); try w.writeAll(str); @@ -9209,9 +9209,9 @@ pub fn getIntrinsic( fields: [expected_fields_len]Type, }, }; - var stack align(@max(@alignOf(std.heap.StackFallbackAllocator(0)), @alignOf(ExpectedContents))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); const name = name: { { @@ -10607,9 +10607,9 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void std.math.big.int.calcToStringLimbsBufferLen(expected_limbs, 10) ]std.math.big.Limb, }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); const limbs = self.metadata_limbs.items[extra.limbs_index..][0..extra.limbs_len]; const bigint: std.math.big.int.Const = .{ @@ -11129,9 +11129,9 @@ fn bigIntConstAssumeCapacity( const bits = type_item.data; const ExpectedContents = [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); var limbs: []std.math.big.Limb = &.{}; defer allocator.free(limbs); diff --git a/src/Air/Legalize.zig b/src/Air/Legalize.zig index 333ec53424..15b6c1237d 100644 --- a/src/Air/Legalize.zig +++ b/src/Air/Legalize.zig @@ -1122,8 +1122,9 @@ fn scalarizeShuffleOneBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro // // So we must first compute `out_idxs` and `in_idxs`. - var sfba_state = std.heap.stackFallback(512, gpa); - const sfba = sfba_state.get(); + var sfba_buf: [512]u8 = undefined; + var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, gpa); + const sfba = sfba_state.allocator(); const out_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); defer sfba.free(out_idxs_buf); @@ -1212,8 +1213,9 @@ fn scalarizeShuffleTwoBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro // %8 = br(%1, %7) // }) - var sfba_state = std.heap.stackFallback(512, gpa); - const sfba = sfba_state.get(); + var sfba_buf: [512]u8 = undefined; + var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, gpa); + const sfba = sfba_state.allocator(); const out_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); defer sfba.free(out_idxs_buf); @@ -2394,9 +2396,9 @@ fn packedStoreBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Error!Air.In }).toRef(), .rhs = Air.internedToRef((keep_mask: { const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), zcu.gpa); - const gpa = stack.get(); + var buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&buf), zcu.gpa); + const gpa = stack.allocator(); var mask_big_int: std.math.big.int.Mutable = .{ .limbs = try gpa.alloc( @@ -2489,8 +2491,9 @@ fn packedAggregateInitBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro const agg_ty = orig_ty_pl.ty.toType(); const agg_field_count = agg_ty.structFieldCount(zcu); - var sfba_state = std.heap.stackFallback(@sizeOf([4 * 32 + 2]Air.Inst.Index), gpa); - const sfba = sfba_state.get(); + var sfba_buf: [4 * 32 + 2]Air.Inst.Index = undefined; + var sfba_state: std.heap.StackFallbackAllocator = .init(@ptrCast(&sfba_buf), gpa); + const sfba = sfba_state.allocator(); const inst_buf = try sfba.alloc(Air.Inst.Index, 4 * agg_field_count + 2); defer sfba.free(inst_buf); diff --git a/src/Value.zig b/src/Value.zig index cc0e577f84..3946fb9b15 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -882,8 +882,9 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value { else => unreachable, }; // Avoid hitting gpa for accesses to small packed structs - var sfba_state = std.heap.stackFallback(128, zcu.comp.gpa); - const sfba = sfba_state.get(); + var sfba_buf: [128]u8 = undefined; + var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, zcu.comp.gpa); + const sfba = sfba_state.allocator(); const buf = try sfba.alloc(u8, @intCast((ty.bitSize(zcu) + 7) / 8)); defer sfba.free(buf); int_val.writeToPackedMemory(zcu, buf, 0) catch |err| switch (err) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7641790aae..d7889501ba 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4841,8 +4841,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { { const asm_source = unwrapped_asm.source; - var stack = std.heap.stackFallback(256, f.dg.gpa); - const allocator = stack.get(); + var stack_buf: [256]u8 = undefined; + var stack: std.heap.StackFallbackAllocator = .init(&stack_buf, f.dg.gpa); + const allocator = stack.allocator(); const fixed_asm_source = try allocator.alloc(u8, asm_source.len); defer allocator.free(fixed_asm_source); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ef51f3ba90..88bc40f72f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3605,11 +3605,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); + const allocator = stack.allocator(); const vals = try allocator.alloc(Builder.Constant, elems.len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, elems.len); @@ -3636,11 +3634,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); + const allocator = stack.allocator(); const vals = try allocator.alloc(Builder.Constant, len_including_sentinel); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, len_including_sentinel); @@ -3668,11 +3664,9 @@ pub const Object = struct { switch (aggregate.storage) { .bytes, .elems => { const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); + const allocator = stack.allocator(); const vals = try allocator.alloc(Builder.Constant, vector_type.len); defer allocator.free(vals); @@ -3701,11 +3695,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); + const allocator = stack.allocator(); const vals = try allocator.alloc(Builder.Constant, llvm_len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, llvm_len); @@ -3779,11 +3771,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), o.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); + const allocator = stack.allocator(); const vals = try allocator.alloc(Builder.Constant, llvm_len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, llvm_len); diff --git a/src/codegen/llvm/FuncGen.zig b/src/codegen/llvm/FuncGen.zig index 23a0d01a86..50abf43bee 100644 --- a/src/codegen/llvm/FuncGen.zig +++ b/src/codegen/llvm/FuncGen.zig @@ -3530,11 +3530,9 @@ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) const inst_llvm_ty = try o.lowerType(inst_ty); const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); const scalar_bits = scalar_ty.intInfo(zcu).bits; var smin_big_int: std.math.big.int.Mutable = .{ @@ -3616,11 +3614,9 @@ fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) Allo } if (scalar_ty.isSignedInt(zcu)) { const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); const scalar_bits = scalar_ty.intInfo(zcu).bits; var smin_big_int: std.math.big.int.Mutable = .{ diff --git a/src/codegen/riscv64/CodeGen.zig b/src/codegen/riscv64/CodeGen.zig index cea33b7c66..cd6fcd5153 100644 --- a/src/codegen/riscv64/CodeGen.zig +++ b/src/codegen/riscv64/CodeGen.zig @@ -671,11 +671,12 @@ fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, compt for (deaths) |death| try func.processDeath(death); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - if (opts.update_tracking) {} else std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); + const stack_buf_len = if (opts.update_tracking) 0 else 1; + var stack_buf: [stack_buf_len]ExpectedContents = undefined; + var stack = if (opts.update_tracking) {} else std.heap.StackFallbackAllocator.init(@ptrCast(&stack_buf), func.gpa); var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity( - stack.get(), + stack.allocator(), @typeInfo(ExpectedContents).array.len, ); defer if (!opts.update_tracking) { @@ -4807,9 +4808,9 @@ fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ExpectedContents = extern struct { vals: [expected_num_args][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), func.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), func.gpa); + const allocator = stack.allocator(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); diff --git a/src/codegen/x86_64/CodeGen.zig b/src/codegen/x86_64/CodeGen.zig index 452d7e2136..0affe28a8d 100644 --- a/src/codegen/x86_64/CodeGen.zig +++ b/src/codegen/x86_64/CodeGen.zig @@ -173820,9 +173820,9 @@ fn genLazy(cg: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void { var err_temp = try cg.tempInit(err_ty, err_mcv); const ExpectedContents = [32]Mir.Inst.Index; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); + const allocator = stack.allocator(); const relocs = try allocator.alloc(Mir.Inst.Index, error_set_type.names.len); defer allocator.free(relocs); @@ -174220,11 +174220,12 @@ fn restoreState(self: *CodeGen, state: State, deaths: []const Air.Inst.Index, co for (deaths) |death| try self.processDeath(death, .{ .emit_instructions = opts.emit_instructions }); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - if (opts.update_tracking) {} else std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); + const stack_buf_len = if (opts.update_tracking) 0 else 1; + var stack_buf: [stack_buf_len]ExpectedContents = undefined; + var stack = if (opts.update_tracking) {} else std.heap.StackFallbackAllocator.init(@ptrCast(&stack_buf), self.gpa); var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity( - stack.get(), + stack.allocator(), @typeInfo(ExpectedContents).array.len, ); defer if (!opts.update_tracking) { @@ -175929,9 +175930,9 @@ fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif tys: [32][@sizeOf(Type)]u8 align(@alignOf(Type)), vals: [32][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: [1]ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); @@ -175985,9 +175986,9 @@ fn genCall(self: *CodeGen, info: union(enum) { frame_indices: [32]FrameIndex, reg_locks: [32][@sizeOf(?RegisterLock)]u8 align(@alignOf(?RegisterLock)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), self.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); + const allocator = stack.allocator(); const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len); defer allocator.free(var_args); @@ -176588,9 +176589,9 @@ fn lowerSwitchBr( bigint_limbs: [std.math.big.int.calcTwosCompLimbCount(1 << 10)]std.math.big.Limb, relocs: [1 << 6]Mir.Inst.Index, }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); + const allocator = stack.allocator(); const state = try cg.saveState(); @@ -181154,9 +181155,9 @@ fn resolveCallingConventionValues( const ExpectedContents = extern struct { param_types: [32][@sizeOf(Type)]u8 align(@alignOf(Type)), }; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); + const allocator = stack.allocator(); const param_types = try allocator.alloc(Type, fn_info.param_types.len + var_args.len); defer allocator.free(param_types); @@ -188706,9 +188707,9 @@ const Select = struct { } const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(1 << 10)]std.math.big.Limb; - var stack align(@max(@alignOf(ExpectedContents), @alignOf(std.heap.StackFallbackAllocator(0)))) = - std.heap.stackFallback(@sizeOf(ExpectedContents), cg.gpa); - const allocator = stack.get(); + var stack_buf: ExpectedContents = undefined; + var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); + const allocator = stack.allocator(); var res_big_int: std.math.big.int.Mutable = .{ .limbs = try allocator.alloc( std.math.big.Limb, diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 4ec3598c34..209b7bb9da 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -2690,8 +2690,9 @@ pub fn ensureUnusedRelocCapacity(elf: *Elf, loc_si: Symbol.Index, len: usize) !v const shndx = loc_si.shndx(elf); const sh = shndx.get(elf); if (sh.rela_si == .null) { - var stack = std.heap.stackFallback(32, gpa); - const allocator = stack.get(); + var stack_buf: [32]u8 = undefined; + var stack: std.heap.StackFallbackAllocator = .init(&stack_buf, gpa); + const allocator = stack.allocator(); const rela_name = try std.fmt.allocPrint(allocator, ".rela{s}", .{elf.sectionName(sh.si)}); From 6d40d374d8d2c0eef0c0c6c557461f35b385d085 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Thu, 16 Apr 2026 19:28:14 -0700 Subject: [PATCH 4/5] Merges together the two buffer first allocator implementations --- lib/std/heap.zig | 106 +---------------- lib/std/heap/BufferFirstAllocator.zig | 165 ++++++++++++++++++++++++++ 2 files changed, 167 insertions(+), 104 deletions(-) create mode 100644 lib/std/heap/BufferFirstAllocator.zig diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 1e234ac4a2..8e3d81d26d 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -12,7 +12,7 @@ const Alignment = std.mem.Alignment; pub const ArenaAllocator = @import("heap/ArenaAllocator.zig"); pub const SmpAllocator = @import("heap/SmpAllocator.zig"); pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig"); -pub const StackFirstAllocator = @import("heap/StackFirstAllocator.zig"); +pub const BufferFirstAllocator = @import("heap/BufferFirstAllocator.zig"); pub const PageAllocator = @import("heap/PageAllocator.zig"); pub const WasmAllocator = if (builtin.single_threaded) BrkAllocator else @compileError("unimplemented"); pub const BrkAllocator = @import("heap/BrkAllocator.zig"); @@ -368,89 +368,6 @@ pub const brk_allocator: Allocator = .{ .vtable = &BrkAllocator.vtable, }; -/// An allocator that attempts to allocate from the given buffer, falling back to -/// `fallback_allocator` if this fails. -pub const StackFallbackAllocator = struct { - const Self = @This(); - - fallback_allocator: Allocator, - fixed_buffer_allocator: FixedBufferAllocator, - - pub fn init(buf: []u8, fallback_allocator: Allocator) Self { - return .{ - .fallback_allocator = fallback_allocator, - .fixed_buffer_allocator = .init(buf), - }; - } - - pub fn allocator(self: *Self) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .remap = remap, - .free = free, - }, - }; - } - - fn alloc( - ctx: *anyopaque, - len: usize, - alignment: Alignment, - ra: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(ctx)); - return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse - return self.fallback_allocator.rawAlloc(len, alignment, ra); - } - - fn resize( - ctx: *anyopaque, - buf: []u8, - alignment: Alignment, - new_len: usize, - ra: usize, - ) bool { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); - } else { - return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); - } - } - - fn remap( - context: *anyopaque, - memory: []u8, - alignment: Alignment, - new_len: usize, - return_address: usize, - ) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(context)); - if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { - return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); - } else { - return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); - } - } - - fn free( - ctx: *anyopaque, - buf: []u8, - alignment: Alignment, - ra: usize, - ) void { - const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { - return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); - } else { - return self.fallback_allocator.rawFree(buf, alignment, ra); - } - } -}; - test c_allocator { if (builtin.link_libc) { try testAllocator(c_allocator); @@ -501,26 +418,6 @@ test ArenaAllocator { try testAllocatorAlignedShrink(allocator); } -test StackFallbackAllocator { - var buf: [4096]u8 = undefined; - { - var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); - try testAllocator(stack_allocator.allocator()); - } - { - var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); - try testAllocatorAligned(stack_allocator.allocator()); - } - { - var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); - try testAllocatorLargeAlignment(stack_allocator.allocator()); - } - { - var stack_allocator: StackFallbackAllocator = .init(&buf, std.testing.allocator); - try testAllocatorAlignedShrink(stack_allocator.allocator()); - } -} - /// This one should not try alignments that exceed what C malloc can handle. pub fn testAllocator(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); @@ -989,6 +886,7 @@ test { _ = ArenaAllocator; _ = DebugAllocator(.{}); _ = FixedBufferAllocator; + _ = BufferFirstAllocator; if (builtin.single_threaded) { if (builtin.cpu.arch.isWasm() or (builtin.os.tag == .linux and !builtin.link_libc)) { _ = brk_allocator; diff --git a/lib/std/heap/BufferFirstAllocator.zig b/lib/std/heap/BufferFirstAllocator.zig new file mode 100644 index 0000000000..f0b5e8880b --- /dev/null +++ b/lib/std/heap/BufferFirstAllocator.zig @@ -0,0 +1,165 @@ +//! An allocator that attempts to allocate from the given buffer, falling back to +//! `fallback_allocator` if this fails. + +const std = @import("../std.zig"); +const heap = std.heap; +const testing = std.testing; + +const Alignment = std.mem.Alignment; +const Allocator = std.mem.Allocator; +const FixedBufferAllocator = std.heap.FixedBufferAllocator; + +const BufferFirstAllocator = @This(); + +fallback_allocator: Allocator, +fixed_buffer_allocator: FixedBufferAllocator, + +pub fn init(buffer: []u8, fallback_allocator: Allocator) BufferFirstAllocator { + return .{ + .fallback_allocator = fallback_allocator, + .fixed_buffer_allocator = .init(buffer), + }; +} + +pub fn allocator(self: *BufferFirstAllocator) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .remap = remap, + .free = free, + }, + }; +} + +fn alloc( + ctx: *anyopaque, + len: usize, + alignment: Alignment, + ra: usize, +) ?[*]u8 { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(ctx)); + return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment, ra) orelse + return self.fallback_allocator.rawAlloc(len, alignment, ra); +} + +fn resize( + ctx: *anyopaque, + buf: []u8, + alignment: Alignment, + new_len: usize, + ra: usize, +) bool { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); + } else { + return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); + } +} + +fn remap( + context: *anyopaque, + memory: []u8, + alignment: Alignment, + new_len: usize, + return_address: usize, +) ?[*]u8 { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(context)); + if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { + return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); + } else { + return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); + } +} + +fn free( + ctx: *anyopaque, + buf: []u8, + alignment: Alignment, + ra: usize, +) void { + const self: *BufferFirstAllocator = @ptrCast(@alignCast(ctx)); + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); + } else { + return self.fallback_allocator.rawFree(buf, alignment, ra); + } +} + +test "BufferFirstAllocator" { + // Buffer first specific tests + { + var buffer: [10]u8 = undefined; + var bfa_state: BufferFirstAllocator = .init(&buffer, std.testing.allocator); + const bfa = bfa_state.allocator(); + + // We're under the limit, so we should be allocated in the buffer + const txt0 = "hellowrld"; + const buf0 = try bfa.create(@TypeOf(txt0.*)); + buf0.* = txt0.*; + try testing.expect(bfa_state.fixed_buffer_allocator.ownsPtr(buf0.ptr)); + + // We're now over the limit, so we should be allocated from the fallback + const txt1 = "test!"; + const buf1 = try bfa.create(@TypeOf(txt1.*)); + buf1.* = txt1.*; + try testing.expect(!bfa_state.fixed_buffer_allocator.ownsPtr(buf1.ptr)); + + // Free the allocation that took up space in the buffer + try testing.expectEqualStrings(txt0, buf0); + bfa.destroy(buf0); + + // The next allocation would go in the buffer, but it's too big so it doesn't + const txt2 = "qwertyqwerty"; + const buf2 = try bfa.create(@TypeOf(txt2.*)); + buf2.* = txt2.*; + try testing.expect(!bfa_state.fixed_buffer_allocator.ownsPtr(buf2.ptr)); + + // The next allocation is smaller and fits in the buffer + const txt3 = "dvorak"; + const buf3 = try bfa.create(@TypeOf(txt3.*)); + buf3.* = txt3.*; + try testing.expect(bfa_state.fixed_buffer_allocator.ownsPtr(buf3.ptr)); + + // The remainder in the buffer is too small for the following allocation so it falls back + const txt4 = "moretext"; + const buf4 = try bfa.create(@TypeOf(txt4.*)); + buf4.* = txt4.*; + try testing.expect(!bfa_state.fixed_buffer_allocator.ownsPtr(buf4.ptr)); + + // Check equality on the remaining buffers and free them + try testing.expectEqualStrings(txt1, buf1); + bfa.destroy(buf1); + try testing.expectEqualStrings(txt2, buf2); + bfa.destroy(buf2); + try testing.expectEqualStrings(txt3, buf3); + bfa.destroy(buf3); + try testing.expectEqualStrings(txt4, buf4); + bfa.destroy(buf4); + + try testing.expectEqual(0, bfa_state.fixed_buffer_allocator.end_index); + } + + // Standard allocator tests + { + var buf: [4096]u8 = undefined; + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocator(bfa.allocator()); + } + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocatorAligned(bfa.allocator()); + } + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocatorLargeAlignment(bfa.allocator()); + } + { + var bfa: BufferFirstAllocator = .init(&buf, std.testing.allocator); + try heap.testAllocatorAlignedShrink(bfa.allocator()); + } + } +} From e2c3920fb178a7e785036238a7c8207539b08902 Mon Sep 17 00:00:00 2001 From: Mason Remaley Date: Thu, 16 Apr 2026 19:28:48 -0700 Subject: [PATCH 5/5] Renames buffer first allocator in compiler and std --- lib/compiler/aro/aro/CodeGen.zig | 6 +- lib/compiler/aro/aro/Compilation.zig | 30 ++--- lib/compiler/aro/aro/Driver.zig | 18 +-- lib/compiler/aro/aro/Parser.zig | 36 +++--- lib/compiler/aro/aro/Pragma.zig | 6 +- lib/compiler/aro/aro/Preprocessor.zig | 18 +-- lib/compiler/aro/aro/pragmas/message.zig | 6 +- lib/compiler/aro/aro/text_literal.zig | 6 +- lib/compiler/aro/assembly_backend/x86_64.zig | 6 +- lib/std/debug.zig | 5 +- lib/std/fs/path.zig | 4 +- lib/std/heap/StackFirstAllocator.zig | 123 ------------------- lib/std/zig/AstGen.zig | 42 +++---- lib/std/zig/ZonGen.zig | 10 +- lib/std/zig/llvm/Builder.zig | 24 ++-- src/Air/Legalize.zig | 48 ++++---- src/Value.zig | 12 +- src/codegen/c.zig | 6 +- src/codegen/llvm.zig | 30 ++--- src/codegen/llvm/FuncGen.zig | 12 +- src/codegen/riscv64/CodeGen.zig | 12 +- src/codegen/x86_64/CodeGen.zig | 42 +++---- src/link/Elf2.zig | 6 +- 23 files changed, 193 insertions(+), 315 deletions(-) delete mode 100644 lib/std/heap/StackFirstAllocator.zig diff --git a/lib/compiler/aro/aro/CodeGen.zig b/lib/compiler/aro/aro/CodeGen.zig index ecccab3c1e..5a65b8b6c4 100644 --- a/lib/compiler/aro/aro/CodeGen.zig +++ b/lib/compiler/aro/aro/CodeGen.zig @@ -54,9 +54,9 @@ return_label: Ir.Ref = undefined, compound_assign_dummy: ?Ir.Ref = null, fn fail(c: *CodeGen, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { - var sf_buf: [u8]1024 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, c.comp.gpa); - const allocator = sf.allocator(); + var bfa_buf: [u8]1024 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, c.comp.gpa); + const allocator = bfa.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 2b6a0f39a8..d6ed6c00df 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -1761,9 +1761,9 @@ fn addToSearchPath(comp: *Compilation, include: Include, verbose: bool) !void { try comp.search_path.append(comp.gpa, include); } fn removeDuplicateSearchPaths(comp: *Compilation, start: usize, verbose: bool) !void { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, comp.gpa); - const allocator = sf.allocator(); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, comp.gpa); + const allocator = bfa.allocator(); var seen_includes: std.StringHashMapUnmanaged(void) = .empty; defer seen_includes.deinit(allocator); var seen_frameworks: std.StringHashMapUnmanaged(void) = .empty; @@ -1977,11 +1977,11 @@ const FindInclude = struct { ) Allocator.Error!?Result { const comp = find.comp; - var stack_fallback_buf: [path_buf_stack_limit]u8 = undefined; - var stack_fallback: std.heap.StackFallbackAllocator = .init(&stack_fallback_buf, comp.gpa); - const sfa = stack_fallback.allocator(); - const header_path = try std.fmt.allocPrint(sfa, format, args); - defer sfa.free(header_path); + var bfa_buf: [path_buf_stack_limit]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, comp.gpa); + const bfa = bfa_state.allocator(); + const header_path = try std.fmt.allocPrint(bfa, format, args); + defer bfa.free(header_path); find.comp.normalizePath(header_path); const source = comp.addSourceFromPathExtra(header_path, kind) catch |err| switch (err) { @@ -2070,15 +2070,15 @@ pub fn findEmbed( } } - var stack_fallback_buf: [path_buf_stack_limit]u8 = undefined; - var stack_fallback: std.heap.StackFallbackAllocator = .init(&stack_fallback_buf, comp.gpa); - const sf_allocator = stack_fallback.allocator(); + var bfa_buf: [path_buf_stack_limit]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, comp.gpa); + const bfa = bfa_state.allocator(); switch (include_type) { .quotes, .cli => { const dir = std.fs.path.dirname(comp.getSource(includer_token_source).path) orelse "."; - const path = try std.fs.path.join(sf_allocator, &.{ dir, filename }); - defer sf_allocator.free(path); + const path = try std.fs.path.join(bfa, &.{ dir, filename }); + defer bfa.free(path); comp.normalizePath(path); if (comp.getPathContents(path, limit)) |some| { errdefer comp.gpa.free(some); @@ -2092,8 +2092,8 @@ pub fn findEmbed( .angle_brackets => {}, } for (comp.embed_dirs.items) |embed_dir| { - const path = try std.fs.path.join(sf_allocator, &.{ embed_dir, filename }); - defer sf_allocator.free(path); + const path = try std.fs.path.join(bfa, &.{ embed_dir, filename }); + defer bfa.free(path); comp.normalizePath(path); if (comp.getPathContents(path, limit)) |some| { errdefer comp.gpa.free(some); diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index a72837b7fc..fc35e13d96 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -947,9 +947,9 @@ fn addImacros(d: *Driver, path: []const u8) !void { } pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -957,9 +957,9 @@ pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { } pub fn warn(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -1103,9 +1103,9 @@ fn parseTarget(d: *Driver, arch_os_abi: []const u8, opt_cpu_features: ?[]const u } pub fn fatal(d: *Driver, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, d.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, d.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index 879140fff0..fad33316d2 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -215,9 +215,9 @@ fn checkIdentifierCodepointWarnings(p: *Parser, codepoint: u21, loc: Source.Loca assert(codepoint >= 0x80); const prev_total = p.diagnostics.total; - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); if (!char_info.isC99IdChar(codepoint)) { @@ -430,9 +430,9 @@ pub fn err(p: *Parser, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) if (diagnostic.suppress_unless_version) |some| if (!p.comp.langopts.standard.atLeast(some)) return; if (p.diagnostics.effectiveKind(diagnostic) == .off) return; - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); p.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -1539,9 +1539,9 @@ fn staticAssert(p: *Parser) Error!bool { } } else { if (!res.val.toBool(p.comp)) { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); if (p.staticAssertMessage(res_node, str, &allocating) catch return error.OutOfMemory) |message| { @@ -4840,9 +4840,9 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, asm_tok: TokenIndex const expected_items = 8; // arbitrarily chosen, most assembly will have fewer than 8 inputs/outputs/constraints/names const bytes_needed = expected_items * @sizeOf(Tree.Node.AsmStmt.Operand) + expected_items * 2 * @sizeOf(Node.Index); - var stack_fallback_buf: [bytes_needed]u8 = undefined; - var stack_fallback: std.heap.StackFallbackAllocator = .init(&stack_fallback_buf, gpa); - const allocator = stack_fallback.allocator(); + var bfa_buf: [bytes_needed]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const allocator = bfa.allocator(); var operands: std.ArrayList(Tree.Node.AsmStmt.Operand) = .empty; defer operands.deinit(allocator); @@ -9926,9 +9926,9 @@ fn primaryExpr(p: *Parser) Error!?Result { if (p.func.pretty_ident) |some| { qt = some.qt; } else if (p.func.qt) |func_qt| { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); func_qt.printNamed(p.tokSlice(p.func.name), p.comp, &allocating.writer) catch return error.OutOfMemory; @@ -10217,9 +10217,9 @@ fn charLiteral(p: *Parser) Error!?Result { }; const max_chars_expected = 4; - var sf_buf: [max_chars_expected]u32 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(@ptrCast(&sf_buf), gpa); - const allocator = sf.allocator(); + var bfa_buf: [max_chars_expected]u32 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), gpa); + const allocator = bfa.allocator(); var chars: std.ArrayList(u32) = .empty; defer chars.deinit(allocator); diff --git a/lib/compiler/aro/aro/Pragma.zig b/lib/compiler/aro/aro/Pragma.zig index dc8511482b..51a1df99b2 100644 --- a/lib/compiler/aro/aro/Pragma.zig +++ b/lib/compiler/aro/aro/Pragma.zig @@ -212,9 +212,9 @@ pub const Diagnostic = struct { }; pub fn err(pp: *Preprocessor, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) Compilation.Error!void { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index 90c1517386..15ac3b0a1b 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -1023,9 +1023,9 @@ fn err(pp: *Preprocessor, loc: anytype, diagnostic: Diagnostic, args: anytype) C defer pp.diagnostics.state.suppress_system_headers = old_suppress_system; if (diagnostic.show_in_system_headers) pp.diagnostics.state.suppress_system_headers = false; - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; @@ -1053,9 +1053,9 @@ fn err(pp: *Preprocessor, loc: anytype, diagnostic: Diagnostic, args: anytype) C } fn fatal(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) Compilation.Error { - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, fmt, args) catch return error.OutOfMemory; @@ -1076,9 +1076,9 @@ fn fatalNotFound(pp: *Preprocessor, tok: TokenWithExpansionLocs, filename: []con pp.diagnostics.state.fatal_errors = true; defer pp.diagnostics.state.fatal_errors = old; - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); - const allocator = sf.allocator(); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + const allocator = bfa.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/compiler/aro/aro/pragmas/message.zig b/lib/compiler/aro/aro/pragmas/message.zig index 0aa889b2db..fa9e19f36e 100644 --- a/lib/compiler/aro/aro/pragmas/message.zig +++ b/lib/compiler/aro/aro/pragmas/message.zig @@ -44,9 +44,9 @@ fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pra const diagnostic: Pragma.Diagnostic = .pragma_message; - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, pp.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, pp.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, .{str}) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/aro/text_literal.zig b/lib/compiler/aro/aro/text_literal.zig index e0aeaefa7b..73efc5d262 100644 --- a/lib/compiler/aro/aro/text_literal.zig +++ b/lib/compiler/aro/aro/text_literal.zig @@ -315,9 +315,9 @@ pub const Parser = struct { if (p.errored) return; if (p.comp.diagnostics.effectiveKind(diagnostic) == .off) return; - var sf_buf: [1024]u8 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, p.comp.gpa); - var allocating: std.Io.Writer.Allocating = .init(sf.allocator()); + var bfa_buf: [1024]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, p.comp.gpa); + var allocating: std.Io.Writer.Allocating = .init(bfa.allocator()); defer allocating.deinit(); formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory; diff --git a/lib/compiler/aro/assembly_backend/x86_64.zig b/lib/compiler/aro/assembly_backend/x86_64.zig index 055557c378..056db55dbe 100644 --- a/lib/compiler/aro/assembly_backend/x86_64.zig +++ b/lib/compiler/aro/assembly_backend/x86_64.zig @@ -68,9 +68,9 @@ fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void { pub fn todo(c: *AsmCodeGen, msg: []const u8, tok: Tree.TokenIndex) Error { const loc: Source.Location = c.tree.tokens.items(.loc)[tok]; - var sf_buf: [u8]1024 = undefined; - var sf: std.heap.StackFallbackAllocator = .init(&sf_buf, c.comp.gpa); - const allocator = sf.allocator(); + var bfa_buf: [u8]1024 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, c.comp.gpa); + const allocator = bfa.allocator(); var buf: std.ArrayList(u8) = .empty; defer buf.deinit(allocator); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 6eb0b3d673..fe664e1ba3 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1197,8 +1197,9 @@ fn printSourceAtAddress( // Initialize the symbol array with space for at least one element, allocating this on the stack // in the common case where only one element is needed - var symbol_fallback_allocator = std.heap.stackFallback(@sizeOf(Symbol) + @alignOf(Symbol) - 1, getDebugInfoAllocator()); - const symbol_allocator = symbol_fallback_allocator.get(); + var buf: [1]Symbol = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&buf), getDebugInfoAllocator()); + const symbol_allocator = bfa.allocator(); var symbols = std.ArrayList(Symbol).initCapacity(symbol_allocator, 1) catch unreachable; defer symbols.deinit(symbol_allocator); diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index f1577aad28..40a8e9c3c4 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -895,7 +895,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) Allocator // Avoid heap allocation when paths.len is <= @bitSizeOf(usize) * 2 // (we use `* 3` because stackFallback uses 1 usize as a length) var buf: [3]usize = undefined; - var bit_set_allocator_state: std.heap.StackFallbackAllocator = .init(@ptrCast(&buf), allocator); + var bit_set_allocator_state: std.heap.BufferFirstAllocator = .init(@ptrCast(&buf), allocator); const bit_set_allocator = bit_set_allocator_state.allocator(); var relevant_paths = try std.bit_set.DynamicBitSetUnmanaged.initEmpty(bit_set_allocator, paths.len); defer relevant_paths.deinit(bit_set_allocator); @@ -1644,7 +1644,7 @@ fn windowsResolveAgainstCwd( ) ![]u8 { // Space for 256 WTF-16 code units; potentially 3 WTF-8 bytes per WTF-16 code unit var buf: [256 * 3]u8 = undefined; - var temp_allocator_state: std.heap.StackFallbackAllocator = .init(&buf, gpa); + var temp_allocator_state: std.heap.BufferFirstAllocator = .init(&buf, gpa); return switch (parsed.kind) { .drive_absolute, .unc_absolute, diff --git a/lib/std/heap/StackFirstAllocator.zig b/lib/std/heap/StackFirstAllocator.zig deleted file mode 100644 index 9b3e7a2d08..0000000000 --- a/lib/std/heap/StackFirstAllocator.zig +++ /dev/null @@ -1,123 +0,0 @@ -//! A "composite" allocator that attempts to allocate first on the stack, using -//! the provided FixedBufferAllocator; upon failure, the provided secondary -//! allocator is used. reset() is NOT provided, even though available for the -//! (primary) FixedBufferAllocator, because it may not be available for the -//! provided secondary allocator (so callers must call reset() on underlying -//! allocators, themselves, when desirable). - -const std = @import("../std.zig"); -const mem = std.mem; -const Allocator = mem.Allocator; -const FixedBufferAllocator = std.heap.FixedBufferAllocator; -const assert = std.debug.assert; - -const StackFirstAllocator = @This(); - -primary: FixedBufferAllocator, -secondary: Allocator, - -pub fn init(buffer: []u8, secondary_allocator: Allocator) StackFirstAllocator { - return .{ - .primary = .init(buffer), - .secondary = secondary_allocator, - }; -} - -pub fn allocator(self: *StackFirstAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .remap = remap, - .free = free, - }, - }; -} - -pub fn alloc(ctx: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 { - const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); - return FixedBufferAllocator.alloc(&self.primary, len, alignment, ret_addr) orelse - self.secondary.rawAlloc(len, alignment, ret_addr); -} - -pub fn resize(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) bool { - const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); - return if (self.primary.ownsPtr(memory.ptr)) - FixedBufferAllocator.resize(&self.primary, memory, alignment, new_len, ret_addr) - else - self.secondary.rawResize(memory, alignment, new_len, ret_addr); -} - -pub fn remap(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 { - const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); - return if (self.primary.ownsPtr(memory.ptr)) - FixedBufferAllocator.remap(&self.primary, memory, alignment, new_len, ret_addr) - else - self.secondary.rawRemap(memory, alignment, new_len, ret_addr); -} - -pub fn free(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, ret_addr: usize) void { - const self: *StackFirstAllocator = @ptrCast(@alignCast(ctx)); - return if (self.primary.ownsPtr(memory.ptr)) - FixedBufferAllocator.free(&self.primary, memory, alignment, ret_addr) - else - self.secondary.rawFree(memory, alignment, ret_addr); -} - -test StackFirstAllocator { - var arena = std.heap.ArenaAllocator.init(std.testing.allocator); - defer arena.deinit(); - var buffer: [10]u8 = undefined; - var sfa = StackFirstAllocator.init(&buffer, arena.allocator()); - - const expect = std.testing.expect; - const expectEqualStrings = std.testing.expectEqualStrings; - - const al = sfa.allocator(); - const txt = "0123456789"; - - const dest = try al.alloc(u8, txt.len); - @memcpy(dest, txt); - try expectEqualStrings(txt, dest); - try expect(sfa.primary.ownsPtr(dest.ptr)); - - const txt2 = "abcde"; - const dest2 = try al.alloc(u8, txt2.len); - @memcpy(dest2, txt2); - try expectEqualStrings(txt2, dest2); - try expect(!sfa.primary.ownsPtr(dest2.ptr)); - - sfa.primary.reset(); - - const txt3 = "0123"; - const dest3 = try al.alloc(u8, txt3.len); - @memcpy(dest3, txt3); - try expectEqualStrings(txt3, dest3); - try expect(sfa.primary.ownsPtr(dest3.ptr)); - - sfa.primary.reset(); - //arena.reset(); // unnecessary, but allowed (note `defer arena.deinit()` above) - - // stock tests: - { - var buf: [16]u8 = undefined; - var a = StackFirstAllocator.init(&buf, std.testing.allocator); - try std.heap.testAllocator(a.allocator()); - } - { - var buf: [16]u8 = undefined; - var a = StackFirstAllocator.init(&buf, std.testing.allocator); - try std.heap.testAllocatorAligned(a.allocator()); - } - { - var buf: [16]u8 = undefined; - var a = StackFirstAllocator.init(&buf, std.testing.allocator); - try std.heap.testAllocatorLargeAlignment(a.allocator()); - } - { - var buf: [16]u8 = undefined; - var a = StackFirstAllocator.init(&buf, std.testing.allocator); - try std.heap.testAllocatorAlignedShrink(a.allocator()); - } -} diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 31ed0da001..c737aedda7 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -1776,12 +1776,12 @@ fn structInitExpr( } { - var sfba_buf: [256]u8 = undefined; - var sfba: std.heap.StackFallbackAllocator = .init(&sfba_buf, astgen.arena); - const sfba_allocator = sfba.allocator(); + var bfa_buf: [256]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, astgen.arena); + const bfa = bfa_state.allocator(); var duplicate_names: std.array_hash_map.Auto(Zir.NullTerminatedString, ArrayList(Ast.TokenIndex)) = .empty; - try duplicate_names.ensureTotalCapacity(sfba_allocator, @intCast(struct_init.ast.fields.len)); + try duplicate_names.ensureTotalCapacity(bfa, @intCast(struct_init.ast.fields.len)); // When there aren't errors, use this to avoid a second iteration. var any_duplicate = false; @@ -1790,14 +1790,14 @@ fn structInitExpr( const name_token = tree.firstToken(field) - 2; const name_index = try astgen.identAsString(name_token); - const gop = try duplicate_names.getOrPut(sfba_allocator, name_index); + const gop = try duplicate_names.getOrPut(bfa, name_index); if (gop.found_existing) { - try gop.value_ptr.append(sfba_allocator, name_token); + try gop.value_ptr.append(bfa, name_token); any_duplicate = true; } else { gop.value_ptr.* = .empty; - try gop.value_ptr.append(sfba_allocator, name_token); + try gop.value_ptr.append(bfa, name_token); } } @@ -8405,10 +8405,10 @@ fn tunnelThroughClosure( // Otherwise we need a tunnel. First, figure out the path of namespaces we // are tunneling through. This is usually only going to be one or two, so - // use an SFBA to optimize for the common case. - var sfba_buf: [2]usize = undefined; - var sfba: std.heap.StackFallbackAllocator = .init(@ptrCast(&sfba_buf), astgen.arena); - var intermediate_tunnels = try sfba.allocator().alloc(*Scope.Namespace, num_tunnels - 1); + // use an BFA to optimize for the common case. + var bfa_buf: [2]usize = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), astgen.arena); + var intermediate_tunnels = try bfa.allocator().alloc(*Scope.Namespace, num_tunnels - 1); const root_ns = ns: { var i: usize = num_tunnels - 1; @@ -12928,18 +12928,18 @@ fn scanContainer( next: ?*@This(), }; - // The maps below are allocated into this SFBA to avoid using the GPA for small namespaces. - var sfba_buf: [512]u8 = undefined; - var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, astgen.gpa); - const sfba = sfba_state.allocator(); + // The maps below are allocated into this BFA to avoid using the GPA for small namespaces. + var bfa_buf: [512]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, astgen.gpa); + const bfa = bfa_state.allocator(); var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; var decltest_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; defer { - names.deinit(sfba); - test_names.deinit(sfba); - decltest_names.deinit(sfba); + names.deinit(bfa); + test_names.deinit(bfa); + decltest_names.deinit(bfa); } var any_duplicates = false; @@ -13011,7 +13011,7 @@ fn scanContainer( else => {}, // unnamed test .string_literal => { const name = try astgen.strLitAsString(test_name_token); - const gop = try test_names.getOrPut(sfba, name.index); + const gop = try test_names.getOrPut(bfa, name.index); if (gop.found_existing) { var e = gop.value_ptr; while (e.next) |n| e = n; @@ -13024,7 +13024,7 @@ fn scanContainer( }, .identifier => { const name = try astgen.identAsString(test_name_token); - const gop = try decltest_names.getOrPut(sfba, name); + const gop = try decltest_names.getOrPut(bfa, name); if (gop.found_existing) { var e = gop.value_ptr; while (e.next) |n| e = n; @@ -13051,7 +13051,7 @@ fn scanContainer( } { - const gop = try names.getOrPut(sfba, name_str_index); + const gop = try names.getOrPut(bfa, name_str_index); const new_ent: NameEntry = .{ .tok = name_token, .next = null, diff --git a/lib/std/zig/ZonGen.zig b/lib/std/zig/ZonGen.zig index 09842dcf7e..5326f71bea 100644 --- a/lib/std/zig/ZonGen.zig +++ b/lib/std/zig/ZonGen.zig @@ -427,11 +427,11 @@ fn expr(zg: *ZonGen, node: Ast.Node.Index, dest_node: Zoir.Node.Index) Allocator }); // For short initializers, track the names on the stack rather than going through gpa. - var sfba_buf: [256]u8 = undefined; - var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, gpa); - const sfba = sfba_state.allocator(); + var bfa_buf: [256]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const bfa = bfa_state.allocator(); var field_names: std.AutoHashMapUnmanaged(Zoir.NullTerminatedString, Ast.TokenIndex) = .empty; - defer field_names.deinit(sfba); + defer field_names.deinit(bfa); var reported_any_duplicate = false; @@ -439,7 +439,7 @@ fn expr(zg: *ZonGen, node: Ast.Node.Index, dest_node: Zoir.Node.Index) Allocator const name_token = tree.firstToken(elem_node) - 2; if (zg.identAsString(name_token)) |name_str| { zg.extra.items[extra_name_idx] = @intFromEnum(name_str); - const gop = try field_names.getOrPut(sfba, name_str); + const gop = try field_names.getOrPut(bfa, name_str); if (gop.found_existing and !reported_any_duplicate) { reported_any_duplicate = true; const earlier_token = gop.value_ptr.*; diff --git a/lib/std/zig/llvm/Builder.zig b/lib/std/zig/llvm/Builder.zig index accd4d6efc..d4316d45a0 100644 --- a/lib/std/zig/llvm/Builder.zig +++ b/lib/std/zig/llvm/Builder.zig @@ -7638,9 +7638,9 @@ pub const Constant = enum(u32) { std.math.big.int.calcToStringLimbsBufferLen(expected_limbs, 10) ]std.math.big.Limb, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), data.builder.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), data.builder.gpa); + const allocator = bfa.allocator(); const str = bigint.toStringAlloc(allocator, 10, undefined) catch return error.WriteFailed; defer allocator.free(str); try w.writeAll(str); @@ -9209,9 +9209,9 @@ pub fn getIntrinsic( fields: [expected_fields_len]Type, }, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const name = name: { { @@ -10607,9 +10607,9 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void std.math.big.int.calcToStringLimbsBufferLen(expected_limbs, 10) ]std.math.big.Limb, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const limbs = self.metadata_limbs.items[extra.limbs_index..][0..extra.limbs_len]; const bigint: std.math.big.int.Const = .{ @@ -11129,9 +11129,9 @@ fn bigIntConstAssumeCapacity( const bits = type_item.data; const ExpectedContents = [64 / @sizeOf(std.math.big.Limb)]std.math.big.Limb; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); var limbs: []std.math.big.Limb = &.{}; defer allocator.free(limbs); diff --git a/src/Air/Legalize.zig b/src/Air/Legalize.zig index 15b6c1237d..1eeed71bd3 100644 --- a/src/Air/Legalize.zig +++ b/src/Air/Legalize.zig @@ -1122,15 +1122,15 @@ fn scalarizeShuffleOneBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro // // So we must first compute `out_idxs` and `in_idxs`. - var sfba_buf: [512]u8 = undefined; - var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, gpa); - const sfba = sfba_state.allocator(); + var bfa_buf: [512]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const bfa = bfa_state.allocator(); - const out_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(out_idxs_buf); + const out_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(out_idxs_buf); - const in_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(in_idxs_buf); + const in_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(in_idxs_buf); var n: usize = 0; for (shuffle.mask, 0..) |mask, out_idx| switch (mask.unwrap()) { @@ -1144,8 +1144,8 @@ fn scalarizeShuffleOneBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro const init_val: Value = init: { const undef_val = try pt.undefValue(shuffle.result_ty.childType(zcu)); - const elems = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(elems); + const elems = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(elems); for (shuffle.mask, elems) |mask, *elem| elem.* = switch (mask.unwrap()) { .value => |ip_index| ip_index, .elem => undef_val.toIntern(), @@ -1213,15 +1213,15 @@ fn scalarizeShuffleTwoBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro // %8 = br(%1, %7) // }) - var sfba_buf: [512]u8 = undefined; - var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, gpa); - const sfba = sfba_state.allocator(); + var bfa_buf: [512]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const bfa = bfa_state.allocator(); - const out_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(out_idxs_buf); + const out_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(out_idxs_buf); - const in_idxs_buf = try sfba.alloc(InternPool.Index, shuffle.mask.len); - defer sfba.free(in_idxs_buf); + const in_idxs_buf = try bfa.alloc(InternPool.Index, shuffle.mask.len); + defer bfa.free(in_idxs_buf); // Iterate `shuffle.mask` before doing anything, because modifying AIR invalidates it. const out_idxs_a, const in_idxs_a, const out_idxs_b, const in_idxs_b = idxs: { @@ -2396,9 +2396,9 @@ fn packedStoreBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Error!Air.In }).toRef(), .rhs = Air.internedToRef((keep_mask: { const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&buf), zcu.gpa); - const gpa = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), zcu.gpa); + const gpa = bfa.allocator(); var mask_big_int: std.math.big.int.Mutable = .{ .limbs = try gpa.alloc( @@ -2491,12 +2491,12 @@ fn packedAggregateInitBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index) Erro const agg_ty = orig_ty_pl.ty.toType(); const agg_field_count = agg_ty.structFieldCount(zcu); - var sfba_buf: [4 * 32 + 2]Air.Inst.Index = undefined; - var sfba_state: std.heap.StackFallbackAllocator = .init(@ptrCast(&sfba_buf), gpa); - const sfba = sfba_state.allocator(); + var bfa_buf: [4 * 32 + 2]Air.Inst.Index = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), gpa); + const bfa = bfa_state.allocator(); - const inst_buf = try sfba.alloc(Air.Inst.Index, 4 * agg_field_count + 2); - defer sfba.free(inst_buf); + const inst_buf = try bfa.alloc(Air.Inst.Index, 4 * agg_field_count + 2); + defer bfa.free(inst_buf); var main_block: Block = .init(inst_buf); try l.air_instructions.ensureUnusedCapacity(gpa, inst_buf.len); diff --git a/src/Value.zig b/src/Value.zig index 3946fb9b15..2f56c6301e 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -882,16 +882,16 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value { else => unreachable, }; // Avoid hitting gpa for accesses to small packed structs - var sfba_buf: [128]u8 = undefined; - var sfba_state: std.heap.StackFallbackAllocator = .init(&sfba_buf, zcu.comp.gpa); - const sfba = sfba_state.allocator(); - const buf = try sfba.alloc(u8, @intCast((ty.bitSize(zcu) + 7) / 8)); - defer sfba.free(buf); + var bfa_buf: [128]u8 = undefined; + var bfa_state: std.heap.BufferFirstAllocator = .init(&bfa_buf, zcu.comp.gpa); + const bfa = bfa_state.allocator(); + const buf = try bfa.alloc(u8, @intCast((ty.bitSize(zcu) + 7) / 8)); + defer bfa.free(buf); int_val.writeToPackedMemory(zcu, buf, 0) catch |err| switch (err) { error.ReinterpretDeclRef => unreachable, // it's an integer error.OutOfMemory => |e| return e, }; - return Value.readFromPackedMemory(field_ty, pt, buf, field_bit_offset, sfba) catch |err| switch (err) { + return Value.readFromPackedMemory(field_ty, pt, buf, field_bit_offset, bfa) catch |err| switch (err) { error.IllDefinedMemoryLayout => unreachable, // it's a bitpack error.OutOfMemory => |e| return e, }; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d7889501ba..519a4f0bb7 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4841,9 +4841,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { { const asm_source = unwrapped_asm.source; - var stack_buf: [256]u8 = undefined; - var stack: std.heap.StackFallbackAllocator = .init(&stack_buf, f.dg.gpa); - const allocator = stack.allocator(); + var bfa_buf: [256]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, f.dg.gpa); + const allocator = bfa.allocator(); const fixed_asm_source = try allocator.alloc(u8, asm_source.len); defer allocator.free(fixed_asm_source); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 88bc40f72f..31cc88b6a2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3605,9 +3605,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, elems.len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, elems.len); @@ -3634,9 +3634,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, len_including_sentinel); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, len_including_sentinel); @@ -3664,9 +3664,9 @@ pub const Object = struct { switch (aggregate.storage) { .bytes, .elems => { const ExpectedContents = [Builder.expected_fields_len]Builder.Constant; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, vector_type.len); defer allocator.free(vals); @@ -3695,9 +3695,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, llvm_len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, llvm_len); @@ -3771,9 +3771,9 @@ pub const Object = struct { vals: [Builder.expected_fields_len]Builder.Constant, fields: [Builder.expected_fields_len]Builder.Type, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), o.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), o.gpa); + const allocator = bfa.allocator(); const vals = try allocator.alloc(Builder.Constant, llvm_len); defer allocator.free(vals); const fields = try allocator.alloc(Builder.Type, llvm_len); diff --git a/src/codegen/llvm/FuncGen.zig b/src/codegen/llvm/FuncGen.zig index 50abf43bee..37b0df3d85 100644 --- a/src/codegen/llvm/FuncGen.zig +++ b/src/codegen/llvm/FuncGen.zig @@ -3530,9 +3530,9 @@ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) const inst_llvm_ty = try o.lowerType(inst_ty); const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const scalar_bits = scalar_ty.intInfo(zcu).bits; var smin_big_int: std.math.big.int.Mutable = .{ @@ -3614,9 +3614,9 @@ fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) Allo } if (scalar_ty.isSignedInt(zcu)) { const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(256)]std.math.big.Limb; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const scalar_bits = scalar_ty.intInfo(zcu).bits; var smin_big_int: std.math.big.int.Mutable = .{ diff --git a/src/codegen/riscv64/CodeGen.zig b/src/codegen/riscv64/CodeGen.zig index cd6fcd5153..6b65704a4b 100644 --- a/src/codegen/riscv64/CodeGen.zig +++ b/src/codegen/riscv64/CodeGen.zig @@ -672,11 +672,11 @@ fn restoreState(func: *Func, state: State, deaths: []const Air.Inst.Index, compt const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock; const stack_buf_len = if (opts.update_tracking) 0 else 1; - var stack_buf: [stack_buf_len]ExpectedContents = undefined; - var stack = if (opts.update_tracking) {} else std.heap.StackFallbackAllocator.init(@ptrCast(&stack_buf), func.gpa); + var bfa_buf: [stack_buf_len]ExpectedContents = undefined; + var bfa = if (opts.update_tracking) {} else std.heap.BufferFirstAllocator.init(@ptrCast(&bfa_buf), func.gpa); var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity( - stack.allocator(), + bfa.allocator(), @typeInfo(ExpectedContents).array.len, ); defer if (!opts.update_tracking) { @@ -4808,9 +4808,9 @@ fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ExpectedContents = extern struct { vals: [expected_num_args][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), func.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), func.gpa); + const allocator = bfa.allocator(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); diff --git a/src/codegen/x86_64/CodeGen.zig b/src/codegen/x86_64/CodeGen.zig index 0affe28a8d..d939047933 100644 --- a/src/codegen/x86_64/CodeGen.zig +++ b/src/codegen/x86_64/CodeGen.zig @@ -173820,9 +173820,9 @@ fn genLazy(cg: *CodeGen, lazy_sym: link.File.LazySymbol) InnerError!void { var err_temp = try cg.tempInit(err_ty, err_mcv); const ExpectedContents = [32]Mir.Inst.Index; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); const relocs = try allocator.alloc(Mir.Inst.Index, error_set_type.names.len); defer allocator.free(relocs); @@ -174220,9 +174220,9 @@ fn restoreState(self: *CodeGen, state: State, deaths: []const Air.Inst.Index, co for (deaths) |death| try self.processDeath(death, .{ .emit_instructions = opts.emit_instructions }); const ExpectedContents = [@typeInfo(RegisterManager.TrackedRegisters).array.len]RegisterLock; - const stack_buf_len = if (opts.update_tracking) 0 else 1; - var stack_buf: [stack_buf_len]ExpectedContents = undefined; - var stack = if (opts.update_tracking) {} else std.heap.StackFallbackAllocator.init(@ptrCast(&stack_buf), self.gpa); + const bfa_buf_len = if (opts.update_tracking) 0 else 1; + var bfa_buf: [bfa_buf_len]ExpectedContents = undefined; + var stack = if (opts.update_tracking) {} else std.heap.BufferFirstAllocator.init(@ptrCast(&bfa_buf), self.gpa); var reg_locks = if (opts.update_tracking) {} else try std.array_list.Managed(RegisterLock).initCapacity( stack.allocator(), @@ -175930,9 +175930,9 @@ fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif tys: [32][@sizeOf(Type)]u8 align(@alignOf(Type)), vals: [32][@sizeOf(MCValue)]u8 align(@alignOf(MCValue)), }; - var stack_buf: [1]ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: [1]ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const arg_tys = try allocator.alloc(Type, arg_refs.len); defer allocator.free(arg_tys); @@ -175986,9 +175986,9 @@ fn genCall(self: *CodeGen, info: union(enum) { frame_indices: [32]FrameIndex, reg_locks: [32][@sizeOf(?RegisterLock)]u8 align(@alignOf(?RegisterLock)), }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), self.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), self.gpa); + const allocator = bfa.allocator(); const var_args = try allocator.alloc(Type, args.len - fn_info.param_types.len); defer allocator.free(var_args); @@ -176589,9 +176589,9 @@ fn lowerSwitchBr( bigint_limbs: [std.math.big.int.calcTwosCompLimbCount(1 << 10)]std.math.big.Limb, relocs: [1 << 6]Mir.Inst.Index, }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); const state = try cg.saveState(); @@ -181155,9 +181155,9 @@ fn resolveCallingConventionValues( const ExpectedContents = extern struct { param_types: [32][@sizeOf(Type)]u8 align(@alignOf(Type)), }; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); const param_types = try allocator.alloc(Type, fn_info.param_types.len + var_args.len); defer allocator.free(param_types); @@ -188707,9 +188707,9 @@ const Select = struct { } const ExpectedContents = [std.math.big.int.calcTwosCompLimbCount(1 << 10)]std.math.big.Limb; - var stack_buf: ExpectedContents = undefined; - var stack: std.heap.StackFallbackAllocator = .init(@ptrCast(&stack_buf), cg.gpa); - const allocator = stack.allocator(); + var bfa_buf: ExpectedContents = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(@ptrCast(&bfa_buf), cg.gpa); + const allocator = bfa.allocator(); var res_big_int: std.math.big.int.Mutable = .{ .limbs = try allocator.alloc( std.math.big.Limb, diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 209b7bb9da..fe13b20f46 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -2690,9 +2690,9 @@ pub fn ensureUnusedRelocCapacity(elf: *Elf, loc_si: Symbol.Index, len: usize) !v const shndx = loc_si.shndx(elf); const sh = shndx.get(elf); if (sh.rela_si == .null) { - var stack_buf: [32]u8 = undefined; - var stack: std.heap.StackFallbackAllocator = .init(&stack_buf, gpa); - const allocator = stack.allocator(); + var bfa_buf: [32]u8 = undefined; + var bfa: std.heap.BufferFirstAllocator = .init(&bfa_buf, gpa); + const allocator = bfa.allocator(); const rela_name = try std.fmt.allocPrint(allocator, ".rela{s}", .{elf.sectionName(sh.si)});