mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-04-27 19:09:47 +03:00
Merge pull request 'std.heap.ArenaAllocator/std.heap.FixedBufferAllocator: fix end_index memory ordering' (#31647) from justusk/zig:arena-mem-ord into master
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/31647 Reviewed-by: jacobly <jacobly@noreply.codeberg.org>
This commit is contained in:
@@ -207,17 +207,20 @@ pub fn reset(arena: *ArenaAllocator, mode: ResetMode) bool {
|
||||
|
||||
/// Concurrent accesses to node pointers generally have to have acquire/release
|
||||
/// semantics to guarantee that newly allocated notes are in a valid state when
|
||||
/// being inserted into a list. Exceptions are possible, e.g. a CAS loop that
|
||||
/// being inserted into a list. Exceptions are possible, e.g. a cmpxchg loop that
|
||||
/// never accesses the node returned on failure can use monotonic semantics on
|
||||
/// failure, but must still use release semantics on success to protect the node
|
||||
/// it's trying to push.
|
||||
const Node = struct {
|
||||
/// Only meant to be accessed indirectly via the methods supplied by this type,
|
||||
/// except if the node is owned by the thread accessing it.
|
||||
/// Must always be an even number to accomodate `resize` bit.
|
||||
/// Must always be an even number to accommodate `resize` bit.
|
||||
size: Size,
|
||||
/// Concurrent accesses to `end_index` can be monotonic as long as its value
|
||||
/// is compared to a version of `size` before using it to access memory.
|
||||
/// Any increase of `end_index` has to use acquire semantics;
|
||||
/// any decrease of `end_index` that invalidates (formerly) active allocations
|
||||
/// has to use release semantics.
|
||||
/// This guarantees that all accesses to memory that's about to be freed
|
||||
/// happen-before the free is published.
|
||||
/// Since `size` can only grow and never shrink, memory access depending on
|
||||
/// any `end_index` <= any `size` can never be OOB.
|
||||
end_index: usize,
|
||||
@@ -319,11 +322,6 @@ fn pushFreeList(arena: *ArenaAllocator, first: *Node, last: *Node) void {
|
||||
}
|
||||
}
|
||||
|
||||
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
|
||||
return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and
|
||||
@intFromPtr(slice.ptr + slice.len) <= @intFromPtr(container.ptr + container.len);
|
||||
}
|
||||
|
||||
fn alignedIndex(buf_ptr: [*]u8, end_index: usize, alignment: Alignment) usize {
|
||||
// Wrapping arithmetic to avoid overflows since `end_index` isn't bounded by
|
||||
// `size`. This is always ok since the max alignment in byte units is also
|
||||
@@ -357,10 +355,17 @@ fn alloc(ctx: *anyopaque, n: usize, alignment: Alignment, ret_addr: usize) ?[*]u
|
||||
// with a single cmpxchg afterwards, which may fail.
|
||||
|
||||
const alignable = n + alignment.toByteUnits() - 1;
|
||||
const end_index = @atomicRmw(usize, &node.end_index, .Add, alignable, .monotonic);
|
||||
const end_index = @atomicRmw(usize, &node.end_index, .Add, alignable, .acquire); // acquire any memory that may have been freed
|
||||
const aligned_index = alignedIndex(buf.ptr, end_index, alignment);
|
||||
assert(end_index + alignable >= aligned_index + n);
|
||||
_ = @cmpxchgStrong(usize, &node.end_index, end_index + alignable, aligned_index + n, .monotonic, .monotonic);
|
||||
_ = @cmpxchgStrong(
|
||||
usize,
|
||||
&node.end_index,
|
||||
end_index + alignable,
|
||||
aligned_index + n,
|
||||
.monotonic, // no need to release alignment padding; there's no one accessing it!
|
||||
.monotonic,
|
||||
);
|
||||
|
||||
if (aligned_index + n > buf.len) break :first_node .{ node, buf.len };
|
||||
return buf[aligned_index..][0..n].ptr;
|
||||
@@ -382,7 +387,7 @@ fn alloc(ctx: *anyopaque, n: usize, alignment: Alignment, ret_addr: usize) ?[*]u
|
||||
const new_size = mem.alignForward(usize, @sizeOf(Node) + aligned_index + n, 2);
|
||||
|
||||
if (new_size <= allocated_slice.len) {
|
||||
// a `resize` or `free` call managed to sneak in and we need to
|
||||
// A `resize` or `free` call managed to sneak in and we need to
|
||||
// guarantee that `size` is only ever increased; retry!
|
||||
continue :retry;
|
||||
}
|
||||
@@ -390,14 +395,16 @@ fn alloc(ctx: *anyopaque, n: usize, alignment: Alignment, ret_addr: usize) ?[*]u
|
||||
if (arena.child_allocator.rawResize(allocated_slice, .of(Node), new_size, @returnAddress())) {
|
||||
size = new_size;
|
||||
|
||||
if (@cmpxchgStrong( // strong because a spurious failure could result in suboptimal usage of this node
|
||||
// strong because a spurious failure could result in suboptimal
|
||||
// usage of this node
|
||||
if (null == @cmpxchgStrong(
|
||||
usize,
|
||||
&node.end_index,
|
||||
end_index,
|
||||
aligned_index + n,
|
||||
.acquire, // acquire any memory that may have been freed
|
||||
.monotonic,
|
||||
.monotonic,
|
||||
) == null) {
|
||||
)) {
|
||||
const new_buf = allocated_slice.ptr[0..new_size][@sizeOf(Node)..];
|
||||
return new_buf[aligned_index..][0..n].ptr;
|
||||
}
|
||||
@@ -548,40 +555,48 @@ fn resize(ctx: *anyopaque, memory: []u8, alignment: Alignment, new_len: usize, r
|
||||
assert(new_len > 0);
|
||||
|
||||
const node = arena.loadFirstNode().?;
|
||||
const buf = node.loadBuf();
|
||||
|
||||
if (!sliceContainsSlice(buf, memory)) {
|
||||
// Not within current node.
|
||||
return new_len <= memory.len;
|
||||
}
|
||||
const buf_ptr = @as([*]u8, @ptrCast(node)) + @sizeOf(Node);
|
||||
|
||||
const cur_end_index = @atomicLoad(usize, &node.end_index, .monotonic);
|
||||
|
||||
if (buf.ptr + cur_end_index != memory.ptr + memory.len) {
|
||||
// It's not the most recent allocation, so it cannot be expanded.
|
||||
if (buf_ptr + cur_end_index != memory.ptr + memory.len) {
|
||||
// It's not the most recent allocation, so it cannot be expanded,
|
||||
// but it's fine if they want to make it smaller.
|
||||
return new_len <= memory.len;
|
||||
}
|
||||
|
||||
const new_end_index: usize = new_end_index: {
|
||||
if (memory.len >= new_len) {
|
||||
break :new_end_index cur_end_index - (memory.len - new_len);
|
||||
}
|
||||
if (buf.len - cur_end_index >= new_len - memory.len) {
|
||||
break :new_end_index cur_end_index + (new_len - memory.len);
|
||||
}
|
||||
return false;
|
||||
};
|
||||
assert(buf.ptr + new_end_index == memory.ptr + new_len);
|
||||
if (new_len <= memory.len) {
|
||||
const new_end_index = cur_end_index - (memory.len - new_len);
|
||||
assert(buf_ptr + new_end_index == memory.ptr + new_len);
|
||||
|
||||
return null == @cmpxchgStrong(
|
||||
usize,
|
||||
&node.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.monotonic,
|
||||
.monotonic,
|
||||
) or
|
||||
new_len <= memory.len; // Shrinking allocations should always succeed.
|
||||
_ = @cmpxchgStrong(
|
||||
usize,
|
||||
&node.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.release, // release freed memory
|
||||
.monotonic,
|
||||
);
|
||||
return true; // Shrinking allocations should always succeed.
|
||||
}
|
||||
|
||||
// Saturating arithmetic because `end_index` is not guaranteed to be `<= size`.
|
||||
// The allocation we're trying to resize *could* belong to a different node!
|
||||
if (node.loadBuf().len -| cur_end_index >= new_len - memory.len) {
|
||||
const new_end_index = cur_end_index + (new_len - memory.len);
|
||||
assert(buf_ptr + new_end_index == memory.ptr + new_len);
|
||||
|
||||
return null == @cmpxchgStrong(
|
||||
usize,
|
||||
&node.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.acquire, // acquire any memory that may have been freed
|
||||
.monotonic,
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn remap(ctx: *anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
|
||||
@@ -596,29 +611,24 @@ fn free(ctx: *anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) vo
|
||||
assert(memory.len > 0);
|
||||
|
||||
const node = arena.loadFirstNode().?;
|
||||
const buf = node.loadBuf();
|
||||
|
||||
if (!sliceContainsSlice(buf, memory)) {
|
||||
// Not within current node; we cannot free it.
|
||||
return;
|
||||
}
|
||||
const buf_ptr = @as([*]u8, @ptrCast(node)) + @sizeOf(Node);
|
||||
|
||||
const cur_end_index = @atomicLoad(usize, &node.end_index, .monotonic);
|
||||
|
||||
if (buf.ptr + cur_end_index != memory.ptr + memory.len) {
|
||||
if (buf_ptr + cur_end_index != memory.ptr + memory.len) {
|
||||
// Not the most recent allocation; we cannot free it.
|
||||
return;
|
||||
}
|
||||
|
||||
const new_end_index = cur_end_index - memory.len;
|
||||
assert(buf.ptr + new_end_index == memory.ptr);
|
||||
assert(buf_ptr + new_end_index == memory.ptr);
|
||||
|
||||
_ = @cmpxchgStrong(
|
||||
usize,
|
||||
&node.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.monotonic,
|
||||
.release, // release freed memory
|
||||
.monotonic,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -137,7 +137,14 @@ fn threadSafeAlloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ret_addr
|
||||
const adjusted_index = cur_end_index + adjust_off;
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) return null;
|
||||
cur_end_index = @cmpxchgWeak(usize, &self.end_index, cur_end_index, new_end_index, .monotonic, .monotonic) orelse
|
||||
cur_end_index = @cmpxchgWeak(
|
||||
usize,
|
||||
&self.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.acquire, // acquire any memory that may have been freed
|
||||
.monotonic,
|
||||
) orelse
|
||||
return self.buffer[adjusted_index..new_end_index].ptr;
|
||||
}
|
||||
}
|
||||
@@ -154,26 +161,36 @@ fn threadSafeResize(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, new
|
||||
return new_len <= memory.len;
|
||||
}
|
||||
|
||||
const new_end_index: usize = new_end_index: {
|
||||
if (memory.len >= new_len) {
|
||||
break :new_end_index cur_end_index - (memory.len - new_len);
|
||||
}
|
||||
if (fba.buffer.len - cur_end_index >= new_len - memory.len) {
|
||||
break :new_end_index cur_end_index + (new_len - memory.len);
|
||||
}
|
||||
return false;
|
||||
};
|
||||
assert(fba.buffer.ptr + new_end_index == memory.ptr + new_len);
|
||||
if (new_len <= memory.len) {
|
||||
const new_end_index = cur_end_index - (memory.len - new_len);
|
||||
assert(fba.buffer.ptr + new_end_index == memory.ptr + new_len);
|
||||
|
||||
return null == @cmpxchgStrong(
|
||||
usize,
|
||||
&fba.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.monotonic,
|
||||
.monotonic,
|
||||
) or
|
||||
new_len <= memory.len; // Shrinking allocations should always succeed.
|
||||
_ = @cmpxchgStrong(
|
||||
usize,
|
||||
&fba.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.release, // release freed memory
|
||||
.monotonic,
|
||||
);
|
||||
return true; // Shrinking allocations should always succeed.
|
||||
}
|
||||
|
||||
if (fba.buffer.len - cur_end_index >= new_len - memory.len) {
|
||||
const new_end_index = cur_end_index + (new_len - memory.len);
|
||||
assert(fba.buffer.ptr + new_end_index == memory.ptr + new_len);
|
||||
|
||||
return null == @cmpxchgStrong(
|
||||
usize,
|
||||
&fba.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.acquire, // acquire any memory that may have been freed
|
||||
.monotonic,
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn threadSafeRemap(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
|
||||
@@ -201,7 +218,7 @@ fn threadSafeFree(ctx: *anyopaque, memory: []u8, alignment: mem.Alignment, ret_a
|
||||
&fba.end_index,
|
||||
cur_end_index,
|
||||
new_end_index,
|
||||
.monotonic,
|
||||
.release, // release freed memory
|
||||
.monotonic,
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user