std: remove managed array hash map variants

And deprecate all the API names except for:
* `std.array_hash_map.Auto`
* `std.array_hash_map.String`
* `std.array_hash_map.Custom`
This commit is contained in:
Andrew Kelley
2026-04-08 16:02:19 -07:00
parent 0606af509f
commit c3a862522b
26 changed files with 271 additions and 725 deletions
+12 -12
View File
@@ -86,10 +86,10 @@ libc_runtimes_dir: ?[]const u8 = null,
dep_prefix: []const u8 = "",
modules: std.StringArrayHashMap(*Module),
modules: std.array_hash_map.String(*Module),
named_writefiles: std.StringArrayHashMap(*Step.WriteFile),
named_lazy_paths: std.StringArrayHashMap(LazyPath),
named_writefiles: std.array_hash_map.String(*Step.WriteFile),
named_lazy_paths: std.array_hash_map.String(LazyPath),
/// The hash of this instance's package. `""` means that this is the root package.
pkg_hash: []const u8,
/// A mapping from dependency names to package hashes.
@@ -312,9 +312,9 @@ pub fn create(
},
.install_path = undefined,
.args = null,
.modules = .init(arena),
.named_writefiles = .init(arena),
.named_lazy_paths = .init(arena),
.modules = .empty,
.named_writefiles = .empty,
.named_lazy_paths = .empty,
.pkg_hash = "",
.available_deps = available_deps,
.release_mode = .off,
@@ -405,9 +405,9 @@ fn createChildOnly(
.enable_wine = parent.enable_wine,
.libc_runtimes_dir = parent.libc_runtimes_dir,
.dep_prefix = parent.fmt("{s}{s}.", .{ parent.dep_prefix, dep_name }),
.modules = .init(allocator),
.named_writefiles = .init(allocator),
.named_lazy_paths = .init(allocator),
.modules = .empty,
.named_writefiles = .empty,
.named_lazy_paths = .empty,
.pkg_hash = pkg_hash,
.available_deps = pkg_deps,
.release_mode = parent.release_mode,
@@ -908,7 +908,7 @@ pub const AssemblyOptions = struct {
/// `createModule` can be used instead to create a private module.
pub fn addModule(b: *Build, name: []const u8, options: Module.CreateOptions) *Module {
const module = Module.create(b, options);
b.modules.put(b.dupe(name), module) catch @panic("OOM");
b.modules.put(b.graph.arena, b.dupe(name), module) catch @panic("OOM");
return module;
}
@@ -1056,12 +1056,12 @@ pub fn addWriteFile(b: *Build, file_path: []const u8, data: []const u8) *Step.Wr
pub fn addNamedWriteFiles(b: *Build, name: []const u8) *Step.WriteFile {
const wf = Step.WriteFile.create(b);
b.named_writefiles.put(b.dupe(name), wf) catch @panic("OOM");
b.named_writefiles.put(b.graph.arena, b.dupe(name), wf) catch @panic("OOM");
return wf;
}
pub fn addNamedLazyPath(b: *Build, name: []const u8, lp: LazyPath) void {
b.named_lazy_paths.put(b.dupe(name), lp.dupe(b)) catch @panic("OOM");
b.named_lazy_paths.put(b.graph.arena, b.dupe(name), lp.dupe(b)) catch @panic("OOM");
}
/// Creates a step for mutating files inside a temporary directory created lazily
+3 -3
View File
@@ -1814,16 +1814,16 @@ const ElfDumper = struct {
files.putAssumeCapacityNoClobber(object.off - @sizeOf(elf.ar_hdr), object.name);
}
var symbols = std.AutoArrayHashMap(usize, std.array_list.Managed([]const u8)).init(ctx.gpa);
var symbols: std.array_hash_map.Auto(usize, std.array_list.Managed([]const u8)) = .empty;
defer {
for (symbols.values()) |*value| {
value.deinit();
}
symbols.deinit();
symbols.deinit(ctx.gpa);
}
for (ctx.symtab.items) |entry| {
const gop = try symbols.getOrPut(@intCast(entry.off));
const gop = try symbols.getOrPut(ctx.gpa, @intCast(entry.off));
if (!gop.found_existing) {
gop.value_ptr.* = std.array_list.Managed([]const u8).init(ctx.gpa);
}
+34 -32
View File
@@ -38,7 +38,7 @@ pub const Value = union(enum) {
};
step: Step,
values: std.StringArrayHashMap(Value),
values: std.array_hash_map.String(Value),
/// This directory contains the generated file under the name `include_path`.
generated_dir: std.Build.GeneratedFile,
@@ -95,7 +95,7 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeader {
.first_ret_addr = options.first_ret_addr orelse @returnAddress(),
}),
.style = options.style,
.values = .init(owner.allocator),
.values = .empty,
.max_bytes = options.max_bytes,
.include_path = include_path,
@@ -110,7 +110,8 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeader {
}
pub fn addIdent(config_header: *ConfigHeader, name: []const u8, value: []const u8) void {
config_header.values.put(name, .{ .ident = value }) catch @panic("OOM");
const arena = config_header.step.owner.allocator;
config_header.values.put(arena, name, .{ .ident = value }) catch @panic("OOM");
}
pub fn addValue(config_header: *ConfigHeader, name: []const u8, comptime T: type, value: T) void {
@@ -131,43 +132,44 @@ pub fn getOutputFile(ch: *ConfigHeader) std.Build.LazyPath {
}
fn addValueInner(config_header: *ConfigHeader, name: []const u8, comptime T: type, value: T) !void {
const arena = config_header.step.owner.allocator;
switch (@typeInfo(T)) {
.null => {
try config_header.values.put(name, .undef);
try config_header.values.put(arena, name, .undef);
},
.void => {
try config_header.values.put(name, .defined);
try config_header.values.put(arena, name, .defined);
},
.bool => {
try config_header.values.put(name, .{ .boolean = value });
try config_header.values.put(arena, name, .{ .boolean = value });
},
.int => {
try config_header.values.put(name, .{ .int = value });
try config_header.values.put(arena, name, .{ .int = value });
},
.comptime_int => {
try config_header.values.put(name, .{ .int = value });
try config_header.values.put(arena, name, .{ .int = value });
},
.@"enum", .enum_literal => {
try config_header.values.put(name, .{ .ident = @tagName(value) });
try config_header.values.put(arena, name, .{ .ident = @tagName(value) });
},
.optional => {
if (value) |x| {
return addValueInner(config_header, name, @TypeOf(x), x);
} else {
try config_header.values.put(name, .undef);
try config_header.values.put(arena, name, .undef);
}
},
.pointer => |ptr| {
switch (@typeInfo(ptr.child)) {
.array => |array| {
if (ptr.size == .one and array.child == u8) {
try config_header.values.put(name, .{ .string = value });
try config_header.values.put(arena, name, .{ .string = value });
return;
}
},
.int => {
if (ptr.size == .slice and ptr.child == u8) {
try config_header.values.put(name, .{ .string = value });
try config_header.values.put(arena, name, .{ .string = value });
return;
}
},
@@ -218,8 +220,8 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
});
};
switch (config_header.style) {
.autoconf_undef => try render_autoconf_undef(step, contents, bw, config_header.values, src_path),
.autoconf_at => try render_autoconf_at(step, contents, &aw, config_header.values, src_path),
.autoconf_undef => try render_autoconf_undef(step, contents, bw, &config_header.values, src_path),
.autoconf_at => try render_autoconf_at(step, contents, &aw, &config_header.values, src_path),
else => unreachable,
}
},
@@ -282,7 +284,7 @@ fn render_autoconf_undef(
step: *Step,
contents: []const u8,
bw: *Writer,
values: std.StringArrayHashMap(Value),
values: *const std.array_hash_map.String(Value),
src_path: []const u8,
) !void {
const build = step.owner;
@@ -334,7 +336,7 @@ fn render_autoconf_at(
step: *Step,
contents: []const u8,
aw: *Writer.Allocating,
values: std.StringArrayHashMap(Value),
values: *const std.array_hash_map.String(Value),
src_path: []const u8,
) !void {
const build = step.owner;
@@ -373,7 +375,7 @@ fn render_autoconf_at(
if (!last_line) try bw.writeByte('\n');
}
for (values.unmanaged.entries.slice().items(.key), used) |name, u| {
for (values.entries.slice().items(.key), used) |name, u| {
if (!u) {
try step.addError("{s}: error: config header value unused: '{s}'", .{ src_path, name });
any_errors = true;
@@ -387,14 +389,14 @@ fn render_cmake(
step: *Step,
contents: []const u8,
bw: *Writer,
values: std.StringArrayHashMap(Value),
values: std.array_hash_map.String(Value),
src_path: []const u8,
) !void {
const build = step.owner;
const allocator = build.allocator;
var values_copy = try values.clone();
defer values_copy.deinit();
var values_copy = try values.clone(allocator);
defer values_copy.deinit(allocator);
var any_errors = false;
var line_index: u32 = 0;
@@ -523,7 +525,7 @@ fn render_cmake(
fn render_blank(
gpa: std.mem.Allocator,
bw: *Writer,
defines: std.StringArrayHashMap(Value),
defines: std.array_hash_map.String(Value),
include_path: []const u8,
include_guard_override: ?[]const u8,
) !void {
@@ -555,7 +557,7 @@ fn render_blank(
, .{include_guard_name});
}
fn render_nasm(bw: *Writer, defines: std.StringArrayHashMap(Value)) !void {
fn render_nasm(bw: *Writer, defines: std.array_hash_map.String(Value)) !void {
for (defines.keys(), defines.values()) |name, value| try renderValueNasm(bw, name, value);
}
@@ -586,7 +588,7 @@ fn renderValueNasm(bw: *Writer, name: []const u8, value: Value) !void {
fn expand_variables_autoconf_at(
bw: *Writer,
contents: []const u8,
values: std.StringArrayHashMap(Value),
values: *const std.array_hash_map.String(Value),
used: []bool,
) !void {
const valid_varname_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
@@ -612,7 +614,7 @@ fn expand_variables_autoconf_at(
try bw.writeAll(key);
return error.MissingValue;
};
const value = values.unmanaged.entries.slice().items(.value)[index];
const value = values.entries.slice().items(.value)[index];
used[index] = true;
try bw.writeAll(contents[source_offset..curr]);
switch (value) {
@@ -633,7 +635,7 @@ fn expand_variables_autoconf_at(
fn expand_variables_cmake(
allocator: Allocator,
contents: []const u8,
values: std.StringArrayHashMap(Value),
values: std.array_hash_map.String(Value),
) ![]const u8 {
var result: std.array_list.Managed(u8) = .init(allocator);
errdefer result.deinit();
@@ -765,7 +767,7 @@ fn testReplaceVariablesAutoconfAt(
allocator: Allocator,
contents: []const u8,
expected: []const u8,
values: std.StringArrayHashMap(Value),
values: std.array_hash_map.String(Value),
) !void {
var aw: Writer.Allocating = .init(allocator);
defer aw.deinit();
@@ -784,7 +786,7 @@ fn testReplaceVariablesCMake(
allocator: Allocator,
contents: []const u8,
expected: []const u8,
values: std.StringArrayHashMap(Value),
values: std.array_hash_map.String(Value),
) !void {
const actual = try expand_variables_cmake(allocator, contents, values);
defer allocator.free(actual);
@@ -794,7 +796,7 @@ fn testReplaceVariablesCMake(
test "expand_variables_autoconf_at simple cases" {
const allocator = std.testing.allocator;
var values: std.StringArrayHashMap(Value) = .init(allocator);
var values: std.array_hash_map.String(Value) = .init(allocator);
defer values.deinit();
// empty strings are preserved
@@ -890,7 +892,7 @@ test "expand_variables_autoconf_at simple cases" {
test "expand_variables_autoconf_at edge cases" {
const allocator = std.testing.allocator;
var values: std.StringArrayHashMap(Value) = .init(allocator);
var values: std.array_hash_map.String(Value) = .init(allocator);
defer values.deinit();
// @-vars resolved only when they wrap valid characters, otherwise considered literals
@@ -906,7 +908,7 @@ test "expand_variables_autoconf_at edge cases" {
test "expand_variables_cmake simple cases" {
const allocator = std.testing.allocator;
var values: std.StringArrayHashMap(Value) = .init(allocator);
var values: std.array_hash_map.String(Value) = .init(allocator);
defer values.deinit();
try values.putNoClobber("undef", .undef);
@@ -994,7 +996,7 @@ test "expand_variables_cmake simple cases" {
test "expand_variables_cmake edge cases" {
const allocator = std.testing.allocator;
var values: std.StringArrayHashMap(Value) = .init(allocator);
var values: std.array_hash_map.String(Value) = .init(allocator);
defer values.deinit();
// special symbols
@@ -1055,7 +1057,7 @@ test "expand_variables_cmake edge cases" {
test "expand_variables_cmake escaped characters" {
const allocator = std.testing.allocator;
var values: std.StringArrayHashMap(Value) = .init(allocator);
var values: std.array_hash_map.String(Value) = .init(allocator);
defer values.deinit();
try values.putNoClobber("string", Value{ .string = "text" });
+96 -555
View File
@@ -12,27 +12,15 @@ const hash_map = @This();
/// An `ArrayHashMap` with default hash and equal functions.
///
/// See `AutoContext` for a description of the hash and equal implementations.
pub fn AutoArrayHashMap(comptime K: type, comptime V: type) type {
pub fn Auto(comptime K: type, comptime V: type) type {
return ArrayHashMap(K, V, AutoContext(K), !autoEqlIsCheap(K));
}
/// An `ArrayHashMapUnmanaged` with default hash and equal functions.
///
/// See `AutoContext` for a description of the hash and equal implementations.
pub fn AutoArrayHashMapUnmanaged(comptime K: type, comptime V: type) type {
return ArrayHashMapUnmanaged(K, V, AutoContext(K), !autoEqlIsCheap(K));
}
/// An `ArrayHashMap` with strings as keys.
pub fn StringArrayHashMap(comptime V: type) type {
pub fn String(comptime V: type) type {
return ArrayHashMap([]const u8, V, StringContext, true);
}
/// An `ArrayHashMapUnmanaged` with strings as keys.
pub fn StringArrayHashMapUnmanaged(comptime V: type) type {
return ArrayHashMapUnmanaged([]const u8, V, StringContext, true);
}
pub const StringContext = struct {
pub fn hash(self: @This(), s: []const u8) u32 {
_ = self;
@@ -53,454 +41,8 @@ pub fn hashString(s: []const u8) u32 {
return @truncate(std.hash.Wyhash.hash(0, s));
}
/// Deprecated in favor of `ArrayHashMapWithAllocator` (no code changes needed)
/// or `ArrayHashMapUnmanaged` (will need to update callsites to pass an
/// allocator). After Zig 0.14.0 is released, `ArrayHashMapWithAllocator` will
/// be removed and `ArrayHashMapUnmanaged` will be a deprecated alias. After
/// Zig 0.15.0 is released, the deprecated alias `ArrayHashMapUnmanaged` will
/// be removed.
pub const ArrayHashMap = ArrayHashMapWithAllocator;
/// A hash table of keys and values, each stored sequentially.
///
/// Insertion order is preserved. In general, this data structure supports the same
/// operations as `std.ArrayList`.
///
/// Deletion operations:
/// * `swapRemove` - O(1)
/// * `orderedRemove` - O(N)
///
/// Modifying the hash map while iterating is allowed, however, one must understand
/// the (well defined) behavior when mixing insertions and deletions with iteration.
///
/// See `ArrayHashMapUnmanaged` for a variant of this data structure that accepts an
/// `Allocator` as a parameter when needed rather than storing it.
pub fn ArrayHashMapWithAllocator(
comptime K: type,
comptime V: type,
/// A namespace that provides these two functions:
/// * `pub fn hash(self, K) u32`
/// * `pub fn eql(self, K, K, usize) bool`
///
/// The final `usize` in the `eql` function represents the index of the key
/// that's already inside the map.
comptime Context: type,
/// When `false`, this data structure is biased towards cheap `eql`
/// functions and avoids storing each key's hash in the table. Setting
/// `store_hash` to `true` incurs more memory cost but limits `eql` to
/// being called only once per insertion/deletion (provided there are no
/// hash collisions).
comptime store_hash: bool,
) type {
return struct {
unmanaged: Unmanaged,
allocator: Allocator,
ctx: Context,
/// The ArrayHashMapUnmanaged type using the same settings as this managed map.
pub const Unmanaged = ArrayHashMapUnmanaged(K, V, Context, store_hash);
/// Pointers to a key and value in the backing store of this map.
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const Entry = Unmanaged.Entry;
/// A KV pair which has been copied out of the backing store
pub const KV = Unmanaged.KV;
/// The Data type used for the MultiArrayList backing this map
pub const Data = Unmanaged.Data;
/// The MultiArrayList type backing this map
pub const DataList = Unmanaged.DataList;
/// The stored hash type, either u32 or void.
pub const Hash = Unmanaged.Hash;
/// getOrPut variants return this structure, with pointers
/// to the backing store and a flag to indicate whether an
/// existing entry was found.
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureTotalCapacity`/`ensureUnusedCapacity` was previously used.
pub const GetOrPutResult = Unmanaged.GetOrPutResult;
/// An Iterator over Entry pointers.
pub const Iterator = Unmanaged.Iterator;
const Self = @This();
/// Create an ArrayHashMap instance which will use a specified allocator.
pub fn init(allocator: Allocator) Self {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead.");
return initContext(allocator, undefined);
}
pub fn initContext(allocator: Allocator, ctx: Context) Self {
return .{
.unmanaged = .empty,
.allocator = allocator,
.ctx = ctx,
};
}
/// Frees the backing allocation and leaves the map in an undefined state.
/// Note that this does not free keys or values. You must take care of that
/// before calling this function, if it is needed.
pub fn deinit(self: *Self) void {
self.unmanaged.deinit(self.allocator);
self.* = undefined;
}
/// Puts the hash map into a state where any method call that would
/// cause an existing key or value pointer to become invalidated will
/// instead trigger an assertion.
///
/// An additional call to `lockPointers` in such state also triggers an
/// assertion.
///
/// `unlockPointers` returns the hash map to the previous state.
pub fn lockPointers(self: *Self) void {
self.unmanaged.lockPointers();
}
/// Undoes a call to `lockPointers`.
pub fn unlockPointers(self: *Self) void {
self.unmanaged.unlockPointers();
}
/// Clears the map but retains the backing allocation for future use.
pub fn clearRetainingCapacity(self: *Self) void {
return self.unmanaged.clearRetainingCapacity();
}
/// Clears the map and releases the backing allocation
pub fn clearAndFree(self: *Self) void {
return self.unmanaged.clearAndFree(self.allocator);
}
/// Returns the number of KV pairs stored in this map.
pub fn count(self: Self) usize {
return self.unmanaged.count();
}
/// Returns the backing array of keys in this map. Modifying the map may
/// invalidate this array. Modifying this array in a way that changes
/// key hashes or key equality puts the map into an unusable state until
/// `reIndex` is called.
pub fn keys(self: Self) []K {
return self.unmanaged.keys();
}
/// Returns the backing array of values in this map. Modifying the map
/// may invalidate this array. It is permitted to modify the values in
/// this array.
pub fn values(self: Self) []V {
return self.unmanaged.values();
}
/// Returns an iterator over the pairs in this map.
/// Modifying the map may invalidate this iterator.
pub fn iterator(self: *const Self) Iterator {
return self.unmanaged.iterator();
}
/// If key exists this function cannot fail.
/// If there is an existing item with `key`, then the result
/// `Entry` pointer points to it, and found_existing is true.
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointer points to it. Caller should then initialize
/// the value (but not the key).
pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx);
}
pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult {
return self.unmanaged.getOrPutContextAdapted(self.allocator, key, ctx, self.ctx);
}
/// If there is an existing item with `key`, then the result
/// `Entry` pointer points to it, and found_existing is true.
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointer points to it. Caller should then initialize
/// the value (but not the key).
/// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it.
pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx);
}
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx);
}
pub fn getOrPutValue(self: *Self, key: K, value: V) !GetOrPutResult {
return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx);
}
/// Increases capacity, guaranteeing that insertions up until the
/// `expected_count` will not cause an allocation, and therefore cannot fail.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
return self.unmanaged.ensureTotalCapacityContext(self.allocator, new_capacity, self.ctx);
}
/// Increases capacity, guaranteeing that insertions up until
/// `additional_count` **more** items will not cause an allocation, and
/// therefore cannot fail.
pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx);
}
/// Returns the number of total elements which may be present before it is
/// no longer guaranteed that no allocations will be performed.
pub fn capacity(self: Self) usize {
return self.unmanaged.capacity();
}
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPut`.
pub fn put(self: *Self, key: K, value: V) !void {
return self.unmanaged.putContext(self.allocator, key, value, self.ctx);
}
/// Inserts a key-value pair into the hash map, asserting that no previous
/// entry with the same key is already present
pub fn putNoClobber(self: *Self, key: K, value: V) !void {
return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Asserts that it does not clobber any existing data.
/// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx);
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
pub fn fetchPut(self: *Self, key: K, value: V) !?KV {
return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx);
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
/// If insertion happuns, asserts there is enough capacity without allocating.
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx);
}
/// Finds pointers to the key and value storage associated with a key.
pub fn getEntry(self: Self, key: K) ?Entry {
return self.unmanaged.getEntryContext(key, self.ctx);
}
pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
return self.unmanaged.getEntryAdapted(key, ctx);
}
/// Finds the index in the `entries` array where a key is stored
pub fn getIndex(self: Self, key: K) ?usize {
return self.unmanaged.getIndexContext(key, self.ctx);
}
pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize {
return self.unmanaged.getIndexAdapted(key, ctx);
}
/// Find the value associated with a key
pub fn get(self: Self, key: K) ?V {
return self.unmanaged.getContext(key, self.ctx);
}
pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
return self.unmanaged.getAdapted(key, ctx);
}
/// Find a pointer to the value associated with a key
pub fn getPtr(self: Self, key: K) ?*V {
return self.unmanaged.getPtrContext(key, self.ctx);
}
pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
return self.unmanaged.getPtrAdapted(key, ctx);
}
/// Find the actual key associated with an adapted key
pub fn getKey(self: Self, key: K) ?K {
return self.unmanaged.getKeyContext(key, self.ctx);
}
pub fn getKeyAdapted(self: Self, key: anytype, ctx: anytype) ?K {
return self.unmanaged.getKeyAdapted(key, ctx);
}
/// Find a pointer to the actual key associated with an adapted key
pub fn getKeyPtr(self: Self, key: K) ?*K {
return self.unmanaged.getKeyPtrContext(key, self.ctx);
}
pub fn getKeyPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*K {
return self.unmanaged.getKeyPtrAdapted(key, ctx);
}
/// Check whether a key is stored in the map
pub fn contains(self: Self, key: K) bool {
return self.unmanaged.containsContext(key, self.ctx);
}
pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.containsAdapted(key, ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function. The entry is
/// removed from the underlying array by swapping it with the last
/// element.
pub fn fetchSwapRemove(self: *Self, key: K) ?KV {
return self.unmanaged.fetchSwapRemoveContext(key, self.ctx);
}
pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
return self.unmanaged.fetchSwapRemoveContextAdapted(key, ctx, self.ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function. The entry is
/// removed from the underlying array by shifting all elements forward
/// thereby maintaining the current ordering.
pub fn fetchOrderedRemove(self: *Self, key: K) ?KV {
return self.unmanaged.fetchOrderedRemoveContext(key, self.ctx);
}
pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
return self.unmanaged.fetchOrderedRemoveContextAdapted(key, ctx, self.ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map. The entry is removed from the underlying array
/// by swapping it with the last element. Returns true if an entry
/// was removed, false otherwise.
pub fn swapRemove(self: *Self, key: K) bool {
return self.unmanaged.swapRemoveContext(key, self.ctx);
}
pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.swapRemoveContextAdapted(key, ctx, self.ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map. The entry is removed from the underlying array
/// by shifting all elements forward, thereby maintaining the
/// current ordering. Returns true if an entry was removed, false otherwise.
pub fn orderedRemove(self: *Self, key: K) bool {
return self.unmanaged.orderedRemoveContext(key, self.ctx);
}
pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.orderedRemoveContextAdapted(key, ctx, self.ctx);
}
/// Deletes the item at the specified index in `entries` from
/// the hash map. The entry is removed from the underlying array
/// by swapping it with the last element.
pub fn swapRemoveAt(self: *Self, index: usize) void {
self.unmanaged.swapRemoveAtContext(index, self.ctx);
}
/// Deletes the item at the specified index in `entries` from
/// the hash map. The entry is removed from the underlying array
/// by shifting all elements forward, thereby maintaining the
/// current ordering.
pub fn orderedRemoveAt(self: *Self, index: usize) void {
self.unmanaged.orderedRemoveAtContext(index, self.ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context and allocator as this instance.
pub fn clone(self: Self) !Self {
var other = try self.unmanaged.cloneContext(self.allocator, self.ctx);
return other.promoteContext(self.allocator, self.ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context as this instance, but the specified
/// allocator.
pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self {
var other = try self.unmanaged.cloneContext(allocator, self.ctx);
return other.promoteContext(allocator, self.ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same allocator as this instance, but the
/// specified context.
pub fn cloneWithContext(self: Self, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
var other = try self.unmanaged.cloneContext(self.allocator, ctx);
return other.promoteContext(self.allocator, ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the specified allocator and context.
pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
var other = try self.unmanaged.cloneContext(allocator, ctx);
return other.promoteContext(allocator, ctx);
}
/// Set the map to an empty state, making deinitialization a no-op, and
/// returning a copy of the original.
pub fn move(self: *Self) Self {
self.unmanaged.pointer_stability.assertUnlocked();
const result = self.*;
self.unmanaged = .empty;
return result;
}
/// Recomputes stored hashes and rebuilds the key indexes. If the
/// underlying keys have been modified directly, call this method to
/// recompute the denormalized metadata necessary for the operation of
/// the methods of this map that lookup entries by key.
///
/// One use case for this is directly calling `entries.resize()` to grow
/// the underlying storage, and then setting the `keys` and `values`
/// directly without going through the methods of this map.
///
/// The time complexity of this operation is O(n).
pub fn reIndex(self: *Self) !void {
return self.unmanaged.reIndexContext(self.allocator, self.ctx);
}
/// Sorts the entries and then rebuilds the index.
/// `sort_ctx` must have this method:
/// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
/// Uses a stable sorting algorithm.
pub fn sort(self: *Self, sort_ctx: anytype) void {
return self.unmanaged.sortContext(sort_ctx, self.ctx);
}
/// Sorts the entries and then rebuilds the index.
/// `sort_ctx` must have this method:
/// `fn lessThan(ctx: @TypeOf(ctx), a_index: usize, b_index: usize) bool`
/// Uses an unstable sorting algorithm.
pub fn sortUnstable(self: *Self, sort_ctx: anytype) void {
return self.unmanaged.sortUnstableContext(sort_ctx, self.ctx);
}
/// Shrinks the underlying `Entry` array to `new_len` elements and
/// discards any associated index entries. Keeps capacity the same.
///
/// Asserts the discarded entries remain initialized and capable of
/// performing hash and equality checks. Any deinitialization of
/// discarded entries must take place *after* calling this function.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
return self.unmanaged.shrinkRetainingCapacityContext(new_len, self.ctx);
}
/// Shrinks the underlying `Entry` array to `new_len` elements and
/// discards any associated index entries. Reduces allocated capacity.
///
/// Asserts the discarded entries remain initialized and capable of
/// performing hash and equality checks. It is a bug to call this
/// function if the discarded entries require deinitialization. For
/// that use case, `shrinkRetainingCapacity` can be used instead.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
return self.unmanaged.shrinkAndFreeContext(self.allocator, new_len, self.ctx);
}
/// Removes the last inserted `Entry` in the hash map and returns it if count is nonzero.
/// Otherwise returns null.
pub fn pop(self: *Self) ?KV {
return self.unmanaged.popContext(self.ctx);
}
};
}
/// Deprecated; use `Custom`.
pub const ArrayHashMap = Custom;
/// A hash table of keys and values, each stored sequentially.
///
@@ -522,11 +64,11 @@ pub fn ArrayHashMapWithAllocator(
///
/// This type is designed to have low overhead for small numbers of entries. When
/// `store_hash` is `false` and the number of entries in the map is less than 9,
/// the overhead cost of using `ArrayHashMapUnmanaged` rather than `std.ArrayList` is
/// the overhead cost of using `ArrayHashMap` rather than `std.ArrayList` is
/// only a single pointer-sized integer.
///
/// Default initialization of this struct is deprecated; use `.empty` instead.
pub fn ArrayHashMapUnmanaged(
pub fn Custom(
comptime K: type,
comptime V: type,
/// A namespace that provides these two functions:
@@ -605,9 +147,6 @@ pub fn ArrayHashMapUnmanaged(
index: usize,
};
/// The ArrayHashMap type using the same settings as this managed map.
pub const Managed = ArrayHashMap(K, V, Context, store_hash);
/// Some functions require a context only if hashes are not stored.
/// To keep the api simple, this type is only used internally.
const ByIndexContext = if (store_hash) void else Context;
@@ -626,21 +165,6 @@ pub fn ArrayHashMapUnmanaged(
const Oom = Allocator.Error;
/// Convert from an unmanaged map to a managed map. After calling this,
/// the promoted map should no longer be used.
pub fn promote(self: Self, gpa: Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
return self.promoteContext(gpa, undefined);
}
pub fn promoteContext(self: Self, gpa: Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = gpa,
.ctx = ctx,
};
}
pub fn init(gpa: Allocator, key_list: []const K, value_list: []const V) Oom!Self {
var self: Self = .{};
errdefer self.deinit(gpa);
@@ -2189,35 +1713,37 @@ const IndexHeader = struct {
};
test "basic hash map usage" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
try testing.expect((try map.fetchPut(1, 11)) == null);
try testing.expect((try map.fetchPut(2, 22)) == null);
try testing.expect((try map.fetchPut(3, 33)) == null);
try testing.expect((try map.fetchPut(4, 44)) == null);
var map: Auto(i32, i32) = .empty;
defer map.deinit(gpa);
try map.putNoClobber(5, 55);
try testing.expect((try map.fetchPut(5, 66)).?.value == 55);
try testing.expect((try map.fetchPut(5, 55)).?.value == 66);
try testing.expect((try map.fetchPut(gpa, 1, 11)) == null);
try testing.expect((try map.fetchPut(gpa, 2, 22)) == null);
try testing.expect((try map.fetchPut(gpa, 3, 33)) == null);
try testing.expect((try map.fetchPut(gpa, 4, 44)) == null);
const gop1 = try map.getOrPut(5);
try map.putNoClobber(gpa, 5, 55);
try testing.expect((try map.fetchPut(gpa, 5, 66)).?.value == 55);
try testing.expect((try map.fetchPut(gpa, 5, 55)).?.value == 66);
const gop1 = try map.getOrPut(gpa, 5);
try testing.expect(gop1.found_existing == true);
try testing.expect(gop1.value_ptr.* == 55);
try testing.expect(gop1.index == 4);
gop1.value_ptr.* = 77;
try testing.expect(map.getEntry(5).?.value_ptr.* == 77);
const gop2 = try map.getOrPut(99);
const gop2 = try map.getOrPut(gpa, 99);
try testing.expect(gop2.found_existing == false);
try testing.expect(gop2.index == 5);
gop2.value_ptr.* = 42;
try testing.expect(map.getEntry(99).?.value_ptr.* == 42);
const gop3 = try map.getOrPutValue(5, 5);
const gop3 = try map.getOrPutValue(gpa, 5, 5);
try testing.expect(gop3.value_ptr.* == 77);
const gop4 = try map.getOrPutValue(100, 41);
const gop4 = try map.getOrPutValue(gpa, 100, 41);
try testing.expect(gop4.value_ptr.* == 41);
try testing.expect(map.contains(2));
@@ -2234,7 +1760,7 @@ test "basic hash map usage" {
// Since we've used `swapRemove` above, the index of this entry should remain unchanged.
try testing.expect(map.getIndex(100).? == 1);
const gop5 = try map.getOrPut(5);
const gop5 = try map.getOrPut(gpa, 5);
try testing.expect(gop5.found_existing == true);
try testing.expect(gop5.value_ptr.* == 77);
try testing.expect(gop5.index == 4);
@@ -2247,7 +1773,7 @@ test "basic hash map usage" {
try testing.expect(map.orderedRemove(100) == false);
try testing.expect(map.getEntry(100) == null);
try testing.expect(map.get(100) == null);
const gop6 = try map.getOrPut(5);
const gop6 = try map.getOrPut(gpa, 5);
try testing.expect(gop6.found_existing == true);
try testing.expect(gop6.value_ptr.* == 77);
try testing.expect(gop6.index == 3);
@@ -2256,15 +1782,17 @@ test "basic hash map usage" {
}
test "iterator hash map" {
var reset_map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer reset_map.deinit();
const gpa = testing.allocator;
var reset_map: Auto(i32, i32) = .empty;
defer reset_map.deinit(gpa);
// test ensureTotalCapacity with a 0 parameter
try reset_map.ensureTotalCapacity(0);
try reset_map.ensureTotalCapacity(gpa, 0);
try reset_map.putNoClobber(0, 11);
try reset_map.putNoClobber(1, 22);
try reset_map.putNoClobber(2, 33);
try reset_map.putNoClobber(gpa, 0, 11);
try reset_map.putNoClobber(gpa, 1, 22);
try reset_map.putNoClobber(gpa, 2, 33);
const keys = [_]i32{
0, 2, 1,
@@ -2312,10 +1840,12 @@ test "iterator hash map" {
}
test "ensure capacity" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
try map.ensureTotalCapacity(20);
var map: Auto(i32, i32) = .empty;
defer map.deinit(gpa);
try map.ensureTotalCapacity(gpa, 20);
const initial_capacity = map.capacity();
try testing.expect(initial_capacity >= 20);
var i: i32 = 0;
@@ -2329,23 +1859,25 @@ test "ensure capacity" {
test "ensure capacity leak" {
try testing.checkAllAllocationFailures(std.testing.allocator, struct {
pub fn f(allocator: Allocator) !void {
var map = AutoArrayHashMap(i32, i32).init(allocator);
defer map.deinit();
var map: Auto(i32, i32) = .empty;
defer map.deinit(allocator);
var i: i32 = 0;
// put more than `linear_scan_max` in so index_header gets allocated.
while (i <= 20) : (i += 1) try map.put(i, i);
while (i <= 20) : (i += 1) try map.put(allocator, i, i);
}
}.f, .{});
}
test "big map" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Auto(i32, i32) = .empty;
defer map.deinit(gpa);
var i: i32 = 0;
while (i < 8) : (i += 1) {
try map.put(i, i + 10);
try map.put(gpa, i, i + 10);
}
i = 0;
@@ -2358,7 +1890,7 @@ test "big map" {
i = 4;
while (i < 12) : (i += 1) {
try map.put(i, i + 12);
try map.put(gpa, i, i + 12);
}
i = 0;
@@ -2393,17 +1925,19 @@ test "big map" {
}
test "clone" {
var original = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer original.deinit();
const gpa = testing.allocator;
var original: Auto(i32, i32) = .empty;
defer original.deinit(gpa);
// put more than `linear_scan_max` so we can test that the index header is properly cloned
var i: u8 = 0;
while (i < 10) : (i += 1) {
try original.putNoClobber(i, i * 10);
try original.putNoClobber(gpa, i, i * 10);
}
var copy = try original.clone();
defer copy.deinit();
var copy = try original.clone(gpa);
defer copy.deinit(gpa);
i = 0;
while (i < 10) : (i += 1) {
@@ -2419,16 +1953,18 @@ test "clone" {
}
test "shrink" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Auto(i32, i32) = .empty;
defer map.deinit(gpa);
// This test is more interesting if we insert enough entries to allocate the index header.
const num_entries = 200;
var i: i32 = 0;
while (i < num_entries) : (i += 1)
try testing.expect((try map.fetchPut(i, i * 10)) == null);
try testing.expect((try map.fetchPut(gpa, i, i * 10)) == null);
try testing.expect(map.unmanaged.index_header != null);
try testing.expect(map.index_header != null);
try testing.expect(map.count() == num_entries);
// Test `shrinkRetainingCapacity`.
@@ -2437,7 +1973,7 @@ test "shrink" {
try testing.expect(map.capacity() >= num_entries);
i = 0;
while (i < num_entries) : (i += 1) {
const gop = try map.getOrPut(i);
const gop = try map.getOrPut(gpa, i);
if (i < 17) {
try testing.expect(gop.found_existing == true);
try testing.expect(gop.value_ptr.* == i * 10);
@@ -2445,12 +1981,12 @@ test "shrink" {
}
// Test `shrinkAndFree`.
map.shrinkAndFree(15);
map.shrinkAndFree(gpa, 15);
try testing.expect(map.count() == 15);
try testing.expect(map.capacity() == 15);
i = 0;
while (i < num_entries) : (i += 1) {
const gop = try map.getOrPut(i);
const gop = try map.getOrPut(gpa, i);
if (i < 15) {
try testing.expect(gop.found_existing == true);
try testing.expect(gop.value_ptr.* == i * 10);
@@ -2459,15 +1995,17 @@ test "shrink" {
}
test "pop()" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Auto(i32, i32) = .empty;
defer map.deinit(gpa);
// Insert just enough entries so that the map expands. Afterwards,
// pop all entries out of the map.
var i: i32 = 0;
while (i < 9) : (i += 1) {
try testing.expect((try map.fetchPut(i, i)) == null);
try testing.expect((try map.fetchPut(gpa, i, i)) == null);
}
while (map.pop()) |pop| {
@@ -2479,31 +2017,33 @@ test "pop()" {
}
test "reIndex" {
var map = ArrayHashMap(i32, i32, AutoContext(i32), true).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Custom(i32, i32, AutoContext(i32), true) = .empty;
defer map.deinit(gpa);
// Populate via the API.
const num_indexed_entries = 200;
var i: i32 = 0;
while (i < num_indexed_entries) : (i += 1)
try testing.expect((try map.fetchPut(i, i * 10)) == null);
try testing.expect((try map.fetchPut(gpa, i, i * 10)) == null);
// Make sure we allocated an index header.
try testing.expect(map.unmanaged.index_header != null);
try testing.expect(map.index_header != null);
// Now write to the arrays directly.
const num_unindexed_entries = 20;
try map.unmanaged.entries.resize(std.testing.allocator, num_indexed_entries + num_unindexed_entries);
try map.entries.resize(std.testing.allocator, num_indexed_entries + num_unindexed_entries);
for (map.keys()[num_indexed_entries..], map.values()[num_indexed_entries..], num_indexed_entries..) |*key, *value, j| {
key.* = @intCast(j);
value.* = @intCast(j * 10);
}
// After reindexing, we should see everything.
try map.reIndex();
try map.reIndex(gpa);
i = 0;
while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) {
const gop = try map.getOrPut(i);
const gop = try map.getOrPut(gpa, i);
try testing.expect(gop.found_existing == true);
try testing.expect(gop.value_ptr.* == i * 10);
try testing.expect(gop.index == i);
@@ -2511,23 +2051,20 @@ test "reIndex" {
}
test "auto store_hash" {
const HasCheapEql = AutoArrayHashMap(i32, i32);
const HasExpensiveEql = AutoArrayHashMap([32]i32, i32);
const HasCheapEql = Auto(i32, i32);
const HasExpensiveEql = Auto([32]i32, i32);
try testing.expect(@FieldType(HasCheapEql.Data, "hash") == void);
try testing.expect(@FieldType(HasExpensiveEql.Data, "hash") != void);
const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32);
const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32);
try testing.expect(@FieldType(HasCheapEqlUn.Data, "hash") == void);
try testing.expect(@FieldType(HasExpensiveEqlUn.Data, "hash") != void);
}
test "sort" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Auto(i32, i32) = .empty;
defer map.deinit(gpa);
for ([_]i32{ 8, 3, 12, 10, 2, 4, 9, 5, 6, 13, 14, 15, 16, 1, 11, 17, 7 }) |x| {
try map.put(x, x * 3);
try map.put(gpa, x, x * 3);
}
const C = struct {
@@ -2549,15 +2086,17 @@ test "sort" {
}
test "0 sized key" {
var map = AutoArrayHashMap(u0, i32).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Auto(u0, i32) = .empty;
defer map.deinit(gpa);
try testing.expectEqual(map.get(0), null);
try map.put(0, 5);
try map.put(gpa, 0, 5);
try testing.expectEqual(map.get(0), 5);
try map.put(0, 10);
try map.put(gpa, 0, 10);
try testing.expectEqual(map.get(0), 10);
try testing.expectEqual(map.swapRemove(0), true);
@@ -2565,12 +2104,14 @@ test "0 sized key" {
}
test "0 sized key and 0 sized value" {
var map = AutoArrayHashMap(u0, u0).init(std.testing.allocator);
defer map.deinit();
const gpa = testing.allocator;
var map: Auto(u0, u0) = .empty;
defer map.deinit(gpa);
try testing.expectEqual(map.get(0), null);
try map.put(0, 0);
try map.put(gpa, 0, 0);
try testing.expectEqual(map.get(0), 0);
try testing.expectEqual(map.swapRemove(0), true);
@@ -2580,7 +2121,7 @@ test "0 sized key and 0 sized value" {
test "setKey storehash true" {
const gpa = std.testing.allocator;
var map: ArrayHashMapUnmanaged(i32, i32, AutoContext(i32), true) = .empty;
var map: ArrayHashMap(i32, i32, AutoContext(i32), true) = .empty;
defer map.deinit(gpa);
try map.put(gpa, 12, 34);
@@ -2596,7 +2137,7 @@ test "setKey storehash true" {
test "setKey storehash false" {
const gpa = std.testing.allocator;
var map: ArrayHashMapUnmanaged(i32, i32, AutoContext(i32), false) = .empty;
var map: ArrayHashMap(i32, i32, AutoContext(i32), false) = .empty;
defer map.deinit(gpa);
try map.put(gpa, 12, 34);
@@ -2691,7 +2232,7 @@ pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime str
test "orderedRemoveAtMany" {
const gpa = testing.allocator;
var map: AutoArrayHashMapUnmanaged(usize, void) = .empty;
var map: Auto(usize, void) = .empty;
defer map.deinit(gpa);
for (0..10) |n| {
+3 -3
View File
@@ -770,9 +770,9 @@ fn testBasicWriteStream(w: *Stringify) !void {
}
fn getJsonObject(allocator: std.mem.Allocator) !std.json.Value {
var v: std.json.Value = .{ .object = std.json.ObjectMap.init(allocator) };
try v.object.put("one", std.json.Value{ .integer = @as(i64, @intCast(1)) });
try v.object.put("two", std.json.Value{ .float = 2.0 });
var v: std.json.Value = .{ .object = .empty };
try v.object.put(allocator, "one", std.json.Value{ .integer = @as(i64, @intCast(1)) });
try v.object.put(allocator, "two", std.json.Value{ .float = 2.0 });
return v;
}
+4 -4
View File
@@ -1,7 +1,7 @@
const std = @import("std");
const debug = std.debug;
const ArenaAllocator = std.heap.ArenaAllocator;
const StringArrayHashMap = std.StringArrayHashMap;
const StringArrayHashMap = std.array_hash_map.String;
const Allocator = std.mem.Allocator;
const json = std.json;
@@ -103,10 +103,10 @@ pub const Value = union(enum) {
.object_begin => {
switch (try source.nextAllocMax(allocator, .alloc_always, options.max_value_len.?)) {
.object_end => return try handleCompleteValue(&stack, allocator, source, Value{ .object = ObjectMap.init(allocator) }, options) orelse continue,
.object_end => return try handleCompleteValue(&stack, allocator, source, Value{ .object = .empty }, options) orelse continue,
.allocated_string => |key| {
try stack.appendSlice(&[_]Value{
Value{ .object = ObjectMap.init(allocator) },
Value{ .object = .empty },
Value{ .string = key },
});
},
@@ -145,7 +145,7 @@ fn handleCompleteValue(stack: *Array, allocator: Allocator, source: anytype, val
// stack: [..., .object]
var object = &stack.items[stack.items.len - 1].object;
const gop = try object.getOrPut(key);
const gop = try object.getOrPut(allocator, key);
if (gop.found_existing) {
switch (options.duplicate_field_behavior) {
.use_first => {},
+4 -3
View File
@@ -220,14 +220,15 @@ test "Value with duplicate fields" {
}
test "Value.jsonStringify" {
const gpa = testing.allocator;
var vals = [_]Value{
.{ .integer = 1 },
.{ .integer = 2 },
.{ .number_string = "3" },
};
var obj = ObjectMap.init(testing.allocator);
defer obj.deinit();
try obj.putNoClobber("a", .{ .string = "b" });
var obj: ObjectMap = .empty;
defer obj.deinit(gpa);
try obj.putNoClobber(gpa, "a", .{ .string = "b" });
const array = [_]Value{
.null,
.{ .bool = true },
+7 -6
View File
@@ -1,7 +1,3 @@
pub const ArrayHashMap = array_hash_map.ArrayHashMap;
pub const ArrayHashMapUnmanaged = array_hash_map.ArrayHashMapUnmanaged;
pub const AutoArrayHashMap = array_hash_map.AutoArrayHashMap;
pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BitStack = @import("BitStack.zig");
@@ -31,14 +27,19 @@ pub const SinglyLinkedList = @import("SinglyLinkedList.zig");
pub const StaticBitSet = bit_set.StaticBitSet;
pub const StringHashMap = hash_map.StringHashMap;
pub const StringHashMapUnmanaged = hash_map.StringHashMapUnmanaged;
pub const StringArrayHashMap = array_hash_map.StringArrayHashMap;
pub const StringArrayHashMapUnmanaged = array_hash_map.StringArrayHashMapUnmanaged;
pub const Target = @import("Target.zig");
pub const Thread = @import("Thread.zig");
pub const Treap = @import("treap.zig").Treap;
pub const Tz = tz.Tz;
pub const Uri = @import("Uri.zig");
/// Deprecated; use `array_hash_map.Custom`.
pub const ArrayHashMapUnmanaged = array_hash_map.Custom;
/// Deprecated; use `array_hash_map.Auto`.
pub const AutoArrayHashMapUnmanaged = array_hash_map.Auto;
/// Deprecated; use `array_hash_map.String`.
pub const StringArrayHashMapUnmanaged = array_hash_map.String;
/// A contiguous, growable list of items in memory. This is a wrapper around a
/// slice of `T` values.
///
+3 -3
View File
@@ -1779,8 +1779,8 @@ fn structInitExpr(
var sfba = std.heap.stackFallback(256, astgen.arena);
const sfba_allocator = sfba.get();
var duplicate_names = std.AutoArrayHashMap(Zir.NullTerminatedString, ArrayList(Ast.TokenIndex)).init(sfba_allocator);
try duplicate_names.ensureTotalCapacity(@intCast(struct_init.ast.fields.len));
var duplicate_names: std.array_hash_map.Auto(Zir.NullTerminatedString, ArrayList(Ast.TokenIndex)) = .empty;
try duplicate_names.ensureTotalCapacity(sfba_allocator, @intCast(struct_init.ast.fields.len));
// When there aren't errors, use this to avoid a second iteration.
var any_duplicate = false;
@@ -1789,7 +1789,7 @@ fn structInitExpr(
const name_token = tree.firstToken(field) - 2;
const name_index = try astgen.identAsString(name_token);
const gop = try duplicate_names.getOrPut(name_index);
const gop = try duplicate_names.getOrPut(sfba_allocator, name_index);
if (gop.found_existing) {
try gop.value_ptr.append(sfba_allocator, name_token);
+3 -3
View File
@@ -10552,7 +10552,7 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
count: usize = 0,
bytes: usize = 0,
};
var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena);
var counts: std.array_hash_map.Auto(Tag, TagStats) = .empty;
for (ip.locals) |*local| {
// Early check for length 0, because `view()` is invalid if capacity is 0
if (local.mutate.items.len == 0) continue;
@@ -10563,7 +10563,7 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
items.items(.tag)[0..local.mutate.items.len],
items.items(.data)[0..local.mutate.items.len],
) |tag, data| {
const gop = try counts.getOrPut(tag);
const gop = try counts.getOrPut(arena, tag);
if (!gop.found_existing) gop.value_ptr.* = .{};
gop.value_ptr.count += 1;
gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) {
@@ -10799,7 +10799,7 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
}
}
const SortContext = struct {
map: *std.AutoArrayHashMap(Tag, TagStats),
map: *std.array_hash_map.Auto(Tag, TagStats),
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
const values = ctx.map.values();
return values[a_index].bytes > values[b_index].bytes;
+3 -3
View File
@@ -795,7 +795,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
//
// If we don't handle this, we end up writing the default `lgammal` symbol for version 2.33
// twice, which causes a "duplicate symbol" assembler error.
var versions_written = std.AutoArrayHashMap(Version, void).init(arena);
var versions_written: std.array_hash_map.Auto(Version, void) = .empty;
var inc_reader: Io.Reader = .fixed(metadata.inclusions);
@@ -859,7 +859,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
}
versions_written.clearRetainingCapacity();
try versions_written.ensureTotalCapacity(versions_len);
try versions_written.ensureTotalCapacity(arena, versions_len);
{
var ver_buf_i: u8 = 0;
@@ -1035,7 +1035,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
}
versions_written.clearRetainingCapacity();
try versions_written.ensureTotalCapacity(versions_len);
try versions_written.ensureTotalCapacity(arena, versions_len);
{
var ver_buf_i: u8 = 0;
+3 -3
View File
@@ -36,14 +36,14 @@ pub fn writeCoffArchive(
var long_names: StringTable = .{};
defer long_names.deinit(allocator);
var symbol_to_member_index = std.StringArrayHashMap(usize).init(allocator);
defer symbol_to_member_index.deinit();
var symbol_to_member_index: std.array_hash_map.String(usize) = .empty;
defer symbol_to_member_index.deinit(allocator);
var string_table_len: usize = 0;
var num_symbols: usize = 0;
for (members.list.items, 0..) |member, i| {
for (member.symbol_names_for_import_lib) |symbol_name| {
const gop_result = try symbol_to_member_index.getOrPut(symbol_name);
const gop_result = try symbol_to_member_index.getOrPut(allocator, symbol_name);
// When building the symbol map, ignore duplicate symbol names.
// This can happen in cases like (using .def file syntax):
// _foo
+6 -6
View File
@@ -90,10 +90,10 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
// Even a .s file can substitute for a .c file.
const target = comp.getTarget();
const arch_name = std.zig.target.muslArchName(target.cpu.arch, target.abi);
var source_table = std.StringArrayHashMap(Ext).init(comp.gpa);
defer source_table.deinit();
var source_table: std.array_hash_map.String(Ext) = .empty;
defer source_table.deinit(gpa);
try source_table.ensureTotalCapacity(compat_time32_files.len + src_files.len);
try source_table.ensureTotalCapacity(gpa, compat_time32_files.len + src_files.len);
for (src_files) |src_file| {
try addSrcFile(arena, &source_table, src_file);
@@ -107,10 +107,10 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
}
}
var c_source_files = std.array_list.Managed(Compilation.CSourceFile).init(comp.gpa);
var c_source_files = std.array_list.Managed(Compilation.CSourceFile).init(gpa);
defer c_source_files.deinit();
var override_path = std.array_list.Managed(u8).init(comp.gpa);
var override_path = std.array_list.Managed(u8).init(gpa);
defer override_path.deinit();
const s = path.sep_str;
@@ -349,7 +349,7 @@ const Ext = enum {
o3,
};
fn addSrcFile(arena: Allocator, source_table: *std.StringArrayHashMap(Ext), file_path: []const u8) !void {
fn addSrcFile(arena: Allocator, source_table: *std.array_hash_map.String(Ext), file_path: []const u8) !void {
const ext: Ext = ext: {
if (mem.endsWith(u8, file_path, ".c")) {
if (mem.startsWith(u8, file_path, "musl/src/string/") or
+8 -8
View File
@@ -881,10 +881,10 @@ fn flushInner(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id) !void {
self.rela_plt.clearRetainingCapacity();
if (self.zigObjectPtr()) |zo| {
var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
var undefs: std.array_hash_map.Auto(SymbolResolver.Index, std.array_list.Managed(Ref)) = .empty;
defer {
for (undefs.values()) |*refs| refs.deinit();
undefs.deinit();
undefs.deinit(gpa);
}
var has_reloc_errors = false;
@@ -1332,10 +1332,10 @@ fn scanRelocs(self: *Elf) !void {
const gpa = self.base.comp.gpa;
const shared_objects = self.shared_objects.values();
var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
var undefs: std.array_hash_map.Auto(SymbolResolver.Index, std.array_list.Managed(Ref)) = .empty;
defer {
for (undefs.values()) |*refs| refs.deinit();
undefs.deinit();
undefs.deinit(gpa);
}
var has_reloc_errors = false;
@@ -1748,12 +1748,12 @@ pub fn deleteExport(
fn checkDuplicates(self: *Elf) !void {
const gpa = self.base.comp.gpa;
var dupes = std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(File.Index)).init(gpa);
var dupes: std.array_hash_map.Auto(SymbolResolver.Index, std.ArrayList(File.Index)) = .empty;
defer {
for (dupes.values()) |*list| {
list.deinit(gpa);
}
dupes.deinit();
dupes.deinit(gpa);
}
if (self.zigObjectPtr()) |zig_object| {
@@ -2992,10 +2992,10 @@ fn allocateSpecialPhdrs(self: *Elf) void {
fn writeAtoms(self: *Elf) !void {
const gpa = self.base.comp.gpa;
var undefs: std.AutoArrayHashMap(SymbolResolver.Index, std.array_list.Managed(Ref)) = .init(gpa);
var undefs: std.array_hash_map.Auto(SymbolResolver.Index, std.array_list.Managed(Ref)) = .empty;
defer {
for (undefs.values()) |*refs| refs.deinit();
undefs.deinit();
undefs.deinit(gpa);
}
var buffer: std.Io.Writer.Allocating = .init(gpa);
+1 -1
View File
@@ -605,7 +605,7 @@ fn reportUndefined(
.object => |x| x.symbols_resolver.items[rel.r_sym() - x.first_global.?],
inline else => |x| x.symbols_resolver.items[rel.r_sym()],
};
const gop = try undefs.getOrPut(idx);
const gop = try undefs.getOrPut(gpa, idx);
if (!gop.found_existing) {
gop.value_ptr.* = std.array_list.Managed(Elf.Ref).init(gpa);
}
+4 -2
View File
@@ -753,6 +753,8 @@ pub fn markImportsExports(self: *Object, elf_file: *Elf) void {
}
pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutOfMemory}!void {
const gpa = elf_file.base.comp.gpa;
const first_global = self.first_global orelse return;
for (0..self.globals().len) |i| {
const esym_idx = first_global + i;
@@ -772,11 +774,11 @@ pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutO
if (!atom_ptr.alive) continue;
}
const gop = try dupes.getOrPut(self.symbols_resolver.items[i]);
const gop = try dupes.getOrPut(gpa, self.symbols_resolver.items[i]);
if (!gop.found_existing) {
gop.value_ptr.* = .empty;
}
try gop.value_ptr.append(elf_file.base.comp.gpa, self.index);
try gop.value_ptr.append(gpa, self.index);
}
}
+3 -1
View File
@@ -710,6 +710,8 @@ pub fn markImportsExports(self: *ZigObject, elf_file: *Elf) void {
}
pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{OutOfMemory}!void {
const gpa = elf_file.base.comp.gpa;
for (self.global_symbols.items, 0..) |index, i| {
const esym = self.symtab.items(.elf_sym)[index];
const shndx = self.symtab.items(.shndx)[index];
@@ -727,7 +729,7 @@ pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{O
if (!atom_ptr.alive) continue;
}
const gop = try dupes.getOrPut(self.symbols_resolver.items[i]);
const gop = try dupes.getOrPut(gpa, self.symbols_resolver.items[i]);
if (!gop.found_existing) {
gop.value_ptr.* = .empty;
}
+6 -6
View File
@@ -164,8 +164,8 @@ pub fn resolveBoundarySymbols(self: *InternalObject, macho_file: *MachO) !void {
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
var boundary_symbols = std.StringArrayHashMap(MachO.Ref).init(gpa);
defer boundary_symbols.deinit();
var boundary_symbols: std.array_hash_map.String(MachO.Ref) = .empty;
defer boundary_symbols.deinit(gpa);
for (macho_file.objects.items) |index| {
const object = macho_file.getFile(index).?.object;
@@ -180,7 +180,7 @@ pub fn resolveBoundarySymbols(self: *InternalObject, macho_file: *MachO) !void {
mem.startsWith(u8, name, "section$start$") or
mem.startsWith(u8, name, "section$end$"))
{
const gop = try boundary_symbols.getOrPut(name);
const gop = try boundary_symbols.getOrPut(gpa, name);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .index = @intCast(i), .file = index };
}
@@ -344,8 +344,8 @@ pub fn resolveObjcMsgSendSymbols(self: *InternalObject, macho_file: *MachO) !voi
const gpa = macho_file.base.comp.gpa;
var objc_msgsend_syms = std.StringArrayHashMap(MachO.Ref).init(gpa);
defer objc_msgsend_syms.deinit();
var objc_msgsend_syms: std.array_hash_map.String(MachO.Ref) = .empty;
defer objc_msgsend_syms.deinit(gpa);
for (macho_file.objects.items) |index| {
const object = macho_file.getFile(index).?.object;
@@ -360,7 +360,7 @@ pub fn resolveObjcMsgSendSymbols(self: *InternalObject, macho_file: *MachO) !voi
const name = sym.getName(macho_file);
if (mem.startsWith(u8, name, "_objc_msgSend$")) {
const gop = try objc_msgsend_syms.getOrPut(name);
const gop = try objc_msgsend_syms.getOrPut(gpa, name);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .index = @intCast(i), .file = index };
}
+5 -5
View File
@@ -1292,8 +1292,8 @@ fn parseUnwindRecords(self: *Object, allocator: Allocator, cpu_arch: std.Target.
const Superposition = struct { atom: Atom.Index, size: u64, cu: ?UnwindInfo.Record.Index = null, fde: ?Fde.Index = null };
var superposition = std.AutoArrayHashMap(u64, Superposition).init(allocator);
defer superposition.deinit();
var superposition: std.array_hash_map.Auto(u64, Superposition) = .empty;
defer superposition.deinit(allocator);
const slice = self.symtab.slice();
for (slice.items(.nlist), slice.items(.atom), slice.items(.size)) |nlist, atom, size| {
@@ -1301,7 +1301,7 @@ fn parseUnwindRecords(self: *Object, allocator: Allocator, cpu_arch: std.Target.
if (nlist.n_type.bits.type != .sect) continue;
const sect = self.sections.items(.header)[nlist.n_sect - 1];
if (sect.isCode() and sect.size > 0) {
try superposition.ensureUnusedCapacity(1);
try superposition.ensureUnusedCapacity(allocator, 1);
const gop = superposition.getOrPutAssumeCapacity(nlist.n_value);
if (gop.found_existing) {
assert(gop.value_ptr.atom == atom and gop.value_ptr.size == size);
@@ -1315,7 +1315,7 @@ fn parseUnwindRecords(self: *Object, allocator: Allocator, cpu_arch: std.Target.
const atom = rec.getAtom(macho_file);
const addr = atom.getInputAddress(macho_file) + rec.atom_offset;
try superposition.ensureUnusedCapacity(1);
try superposition.ensureUnusedCapacity(allocator, 1);
const gop = superposition.getOrPutAssumeCapacity(addr);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .atom = rec.atom, .size = rec.length };
@@ -1331,7 +1331,7 @@ fn parseUnwindRecords(self: *Object, allocator: Allocator, cpu_arch: std.Target.
const atom = fde.getAtom(macho_file);
const addr = atom.getInputAddress(macho_file) + fde.atom_offset;
try superposition.ensureUnusedCapacity(1);
try superposition.ensureUnusedCapacity(allocator, 1);
const gop = superposition.getOrPutAssumeCapacity(addr);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .atom = fde.atom, .size = fde.pc_range };
+4 -4
View File
@@ -173,18 +173,18 @@ pub fn generate(info: *UnwindInfo, macho_file: *MachO) !void {
}
};
var common_encodings_counts = std.ArrayHashMap(
var common_encodings_counts: std.array_hash_map.Custom(
Encoding,
CommonEncWithCount,
Context,
false,
).init(gpa);
defer common_encodings_counts.deinit();
) = .empty;
defer common_encodings_counts.deinit(gpa);
for (info.records.items) |ref| {
const rec = ref.getUnwindRecord(macho_file);
if (rec.enc.isDwarf(macho_file)) continue;
const gop = try common_encodings_counts.getOrPut(rec.enc);
const gop = try common_encodings_counts.getOrPut(gpa, rec.enc);
if (!gop.found_existing) {
gop.value_ptr.* = .{
.enc = rec.enc,
+28 -27
View File
@@ -67,17 +67,17 @@ const ModuleInfo = struct {
parser: *BinaryModule.Parser,
binary: BinaryModule,
) BinaryModule.ParseError!ModuleInfo {
var entry_points = std.AutoArrayHashMap(ResultId, void).init(arena);
var functions = std.AutoArrayHashMap(ResultId, Fn).init(arena);
var entry_points: std.array_hash_map.Auto(ResultId, void) = .empty;
var functions: std.array_hash_map.Auto(ResultId, Fn) = .empty;
var fn_types = std.AutoHashMap(ResultId, struct {
return_type: ResultId,
param_types: []const ResultId,
}).init(arena);
var calls = std.AutoArrayHashMap(ResultId, void).init(arena);
var calls: std.array_hash_map.Auto(ResultId, void) = .empty;
var callee_store = std.array_list.Managed(ResultId).init(arena);
var function_invocation_globals = std.AutoArrayHashMap(ResultId, void).init(arena);
var function_invocation_globals: std.array_hash_map.Auto(ResultId, void) = .empty;
var result_id_offsets = std.array_list.Managed(u16).init(arena);
var invocation_globals = std.AutoArrayHashMap(ResultId, InvocationGlobal).init(arena);
var invocation_globals: std.array_hash_map.Auto(ResultId, InvocationGlobal) = .empty;
var maybe_current_function: ?ResultId = null;
var fn_ty_id: ResultId = undefined;
@@ -90,7 +90,7 @@ const ModuleInfo = struct {
switch (inst.opcode) {
.OpEntryPoint => {
const entry_point: ResultId = @enumFromInt(inst.operands[1]);
const entry = try entry_points.getOrPut(entry_point);
const entry = try entry_points.getOrPut(arena, entry_point);
if (entry.found_existing) {
log.err("Entry point type {f} has duplicate definition", .{entry_point});
return error.DuplicateId;
@@ -126,7 +126,7 @@ const ModuleInfo = struct {
else
.none;
try invocation_globals.put(result_id, .{
try invocation_globals.put(arena, result_id, .{
.dependencies = .{},
.ty = global_type,
.initializer = initializer,
@@ -145,14 +145,14 @@ const ModuleInfo = struct {
},
.OpFunctionCall => {
const callee: ResultId = @enumFromInt(inst.operands[2]);
try calls.put(callee, {});
try calls.put(arena, callee, {});
},
.OpFunctionEnd => {
const current_function = maybe_current_function orelse {
log.err("encountered OpFunctionEnd without corresponding OpFunction", .{});
return error.InvalidPhysicalFormat;
};
const entry = try functions.getOrPut(current_function);
const entry = try functions.getOrPut(arena, current_function);
if (entry.found_existing) {
log.err("Function {f} has duplicate definition", .{current_function});
return error.DuplicateId;
@@ -170,7 +170,7 @@ const ModuleInfo = struct {
.first_callee = first_callee,
.return_type = fn_type.return_type,
.param_types = fn_type.param_types,
.invocation_globals = try function_invocation_globals.unmanaged.clone(arena),
.invocation_globals = try function_invocation_globals.clone(arena),
};
maybe_current_function = null;
calls.clearRetainingCapacity();
@@ -181,7 +181,7 @@ const ModuleInfo = struct {
for (result_id_offsets.items) |off| {
const result_id: ResultId = @enumFromInt(inst.operands[off]);
if (invocation_globals.contains(result_id)) {
try function_invocation_globals.put(result_id, {});
try function_invocation_globals.put(arena, result_id, {});
}
}
}
@@ -191,11 +191,11 @@ const ModuleInfo = struct {
return error.InvalidPhysicalFormat;
}
return ModuleInfo{
.functions = functions.unmanaged,
.entry_points = entry_points.unmanaged,
return .{
.functions = functions,
.entry_points = entry_points,
.callee_store = callee_store.items,
.invocation_globals = invocation_globals.unmanaged,
.invocation_globals = invocation_globals,
};
}
@@ -583,7 +583,8 @@ const ModuleBuilder = struct {
}
fn emitNewEntryPoints(self: *ModuleBuilder, info: ModuleInfo) !void {
var all_function_invocation_globals = std.AutoArrayHashMap(ResultId, void).init(self.arena);
const arena = self.arena;
var all_function_invocation_globals: std.array_hash_map.Auto(ResultId, void) = .empty;
for (info.entry_points.keys(), 0..) |func, entry_point_index| {
const fn_info = info.functions.get(func).?;
@@ -593,7 +594,7 @@ const ModuleBuilder = struct {
.param_types = fn_info.param_types,
}).?;
try self.section.emit(self.arena, .OpFunction, .{
try self.section.emit(arena, .OpFunction, .{
.id_result_type = fn_info.return_type,
.id_result = ep_id,
.function_control = .{}, // TODO: Copy the attributes from the original function maybe?
@@ -604,13 +605,13 @@ const ModuleBuilder = struct {
const params_id_base: u32 = @intFromEnum(self.allocIds(@intCast(fn_info.param_types.len)));
for (fn_info.param_types, 0..) |param_type, i| {
const id: ResultId = @enumFromInt(params_id_base + @as(u32, @intCast(i)));
try self.section.emit(self.arena, .OpFunctionParameter, .{
try self.section.emit(arena, .OpFunctionParameter, .{
.id_result_type = param_type,
.id_result = id,
});
}
try self.section.emit(self.arena, .OpLabel, .{
try self.section.emit(arena, .OpLabel, .{
.id_result = self.allocId(),
});
@@ -619,10 +620,10 @@ const ModuleBuilder = struct {
// Just quickly construct that set here.
all_function_invocation_globals.clearRetainingCapacity();
for (fn_info.invocation_globals.keys()) |global| {
try all_function_invocation_globals.put(global, {});
try all_function_invocation_globals.put(arena, global, {});
const global_info = info.invocation_globals.get(global).?;
for (global_info.dependencies.keys()) |dependency| {
try all_function_invocation_globals.put(dependency, {});
try all_function_invocation_globals.put(arena, dependency, {});
}
}
@@ -632,7 +633,7 @@ const ModuleBuilder = struct {
const global_info = info.invocation_globals.get(global).?;
const id: ResultId = @enumFromInt(global_id_base + @as(u32, @intCast(i)));
try self.section.emit(self.arena, .OpVariable, .{
try self.section.emit(arena, .OpVariable, .{
.id_result_type = global_info.ty,
.id_result = id,
.storage_class = .function,
@@ -649,7 +650,7 @@ const ModuleBuilder = struct {
assert(initializer_info.param_types.len == 0);
try self.callWithGlobalsAndLinearParams(
all_function_invocation_globals,
&all_function_invocation_globals,
global_info.initializer,
initializer_info,
global_id_base,
@@ -659,21 +660,21 @@ const ModuleBuilder = struct {
// Call the main kernel entry
try self.callWithGlobalsAndLinearParams(
all_function_invocation_globals,
&all_function_invocation_globals,
func,
fn_info,
global_id_base,
params_id_base,
);
try self.section.emit(self.arena, .OpReturn, {});
try self.section.emit(self.arena, .OpFunctionEnd, {});
try self.section.emit(arena, .OpReturn, {});
try self.section.emit(arena, .OpFunctionEnd, {});
}
}
fn callWithGlobalsAndLinearParams(
self: *ModuleBuilder,
all_globals: std.AutoArrayHashMap(ResultId, void),
all_globals: *const std.array_hash_map.Auto(ResultId, void),
func: ResultId,
callee_info: ModuleInfo.Fn,
global_id_base: u32,
+3 -3
View File
@@ -3088,11 +3088,11 @@ fn parseArchive(wasm: *Wasm, obj: link.Input.Object) !void {
// In this case we must force link all embedded object files within the archive
// We loop over all symbols, and then group them by offset as the offset
// notates where the object file starts.
var offsets = std.AutoArrayHashMap(u32, void).init(gpa);
defer offsets.deinit();
var offsets: std.array_hash_map.Auto(u32, void) = .empty;
defer offsets.deinit(gpa);
for (archive.toc.values()) |symbol_offsets| {
for (symbol_offsets.items) |sym_offset| {
try offsets.put(sym_offset, {});
try offsets.put(gpa, sym_offset, {});
}
}
+11 -11
View File
@@ -44,7 +44,7 @@ const StringPairContext = struct {
}
};
const OperandKindMap = std.ArrayHashMap(StringPair, OperandKind, StringPairContext, true);
const OperandKindMap = std.array_hash_map.Custom(StringPair, OperandKind, StringPairContext, true);
/// Khronos made it so that these names are not defined explicitly, so
/// we need to hardcode it (like they did).
@@ -295,9 +295,9 @@ fn render(
);
// Merge the operand kinds from all extensions together.
var all_operand_kinds = OperandKindMap.init(arena);
var all_operand_kinds: OperandKindMap = .empty;
for (registry.operand_kinds) |kind| {
try all_operand_kinds.putNoClobber(.{ "core", kind.kind }, kind);
try all_operand_kinds.putNoClobber(arena, .{ "core", kind.kind }, kind);
}
for (extensions) |ext| {
// Note: extensions may define the same operand kind, with different
@@ -305,11 +305,11 @@ fn render(
// using the name of the extension. This is similar to what
// the official headers do.
try all_operand_kinds.ensureUnusedCapacity(ext.spec.operand_kinds.len);
try all_operand_kinds.ensureUnusedCapacity(arena, ext.spec.operand_kinds.len);
for (ext.spec.operand_kinds) |kind| {
var new_kind = kind;
new_kind.kind = try std.mem.join(arena, ".", &.{ ext.name, kind.kind });
try all_operand_kinds.putNoClobber(.{ ext.name, kind.kind }, new_kind);
try all_operand_kinds.putNoClobber(arena, .{ ext.name, kind.kind }, new_kind);
}
}
@@ -411,11 +411,11 @@ fn renderInstructionsCase(
}
fn renderClass(arena: Allocator, writer: *std.Io.Writer, instructions: []const Instruction) !void {
var class_map = std.StringArrayHashMap(void).init(arena);
var class_map: std.array_hash_map.String(void) = .empty;
for (instructions) |inst| {
if (std.mem.eql(u8, inst.class.?, "@exclude")) continue;
try class_map.put(inst.class.?, {});
try class_map.put(arena, inst.class.?, {});
}
try writer.writeAll("pub const Class = enum {\n");
@@ -538,8 +538,8 @@ fn renderOpcodes(
instructions: []const Instruction,
extended_structs: ExtendedStructSet,
) !void {
var inst_map = std.AutoArrayHashMap(u32, usize).init(arena);
try inst_map.ensureTotalCapacity(instructions.len);
var inst_map: std.array_hash_map.Auto(u32, usize) = .empty;
try inst_map.ensureTotalCapacity(arena, instructions.len);
var aliases = std.array_list.Managed(struct { inst: usize, alias: usize }).init(arena);
try aliases.ensureTotalCapacity(instructions.len);
@@ -653,8 +653,8 @@ fn renderValueEnum(
) !void {
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
var enum_map = std.AutoArrayHashMap(u32, usize).init(arena);
try enum_map.ensureTotalCapacity(enumerants.len);
var enum_map: std.array_hash_map.Auto(u32, usize) = .empty;
try enum_map.ensureTotalCapacity(arena, enumerants.len);
var aliases = std.array_list.Managed(struct { enumerant: usize, alias: usize }).init(arena);
try aliases.ensureTotalCapacity(enumerants.len);
+10 -14
View File
@@ -274,8 +274,8 @@ const MultiSym = struct {
const Parse = struct {
arena: mem.Allocator,
sym_table: *std.StringArrayHashMap(MultiSym),
sections: *std.StringArrayHashMap(void),
sym_table: *std.array_hash_map.String(MultiSym),
sections: *std.array_hash_map.String(void),
elf_bytes: []align(@alignOf(elf.Elf64_Ehdr)) u8,
header: elf.Header,
arch: Arch,
@@ -289,13 +289,11 @@ pub fn main(init: std.process.Init) !void {
var build_all_dir = try Io.Dir.cwd().openDir(io, build_all_path, .{});
var sym_table = std.StringArrayHashMap(MultiSym).init(arena);
var sections = std.StringArrayHashMap(void).init(arena);
var sym_table: std.array_hash_map.String(MultiSym) = .empty;
var sections: std.array_hash_map.String(void) = .empty;
for (arches) |arch| {
const libc_so_path = try std.fmt.allocPrint(arena, "{s}/lib/libc.so", .{
@tagName(arch),
});
const libc_so_path = try std.fmt.allocPrint(arena, "{t}/lib/libc.so", .{arch});
// Read the ELF header.
const elf_bytes = build_all_dir.readFileAllocOptions(
@@ -306,9 +304,7 @@ pub fn main(init: std.process.Init) !void {
.of(elf.Elf64_Ehdr),
null,
) catch |err| {
std.debug.panic("unable to read '{s}/{s}': {s}", .{
build_all_path, libc_so_path, @errorName(err),
});
std.debug.panic("unable to read '{s}/{s}': {t}", .{ build_all_path, libc_so_path, err });
};
var stream: std.Io.Reader = .fixed(elf_bytes);
const header = try elf.Header.read(&stream);
@@ -359,8 +355,8 @@ pub fn main(init: std.process.Init) !void {
// Sort the symbols for deterministic output and cleaner vcs diffs.
const SymTableSort = struct {
sections: *const std.StringArrayHashMap(void),
sym_table: *const std.StringArrayHashMap(MultiSym),
sections: *const std.array_hash_map.String(void),
sym_table: *const std.array_hash_map.String(MultiSym),
/// Sort first by section name, then by symbol name
pub fn lessThan(ctx: @This(), index_a: usize, index_b: usize) bool {
@@ -580,7 +576,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: std.builtin.End
if (mem.eql(u8, sh_name, ".dynsym")) {
dynsym_index = @as(u16, @intCast(i));
}
const gop = try parse.sections.getOrPut(sh_name);
const gop = try parse.sections.getOrPut(arena, sh_name);
section_index_map[i] = @as(u16, @intCast(gop.index));
}
if (dynsym_index == 0) @panic("did not find the .dynsym section");
@@ -653,7 +649,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: std.builtin.End
},
}
const gop = try parse.sym_table.getOrPut(name);
const gop = try parse.sym_table.getOrPut(arena, name);
if (gop.found_existing) {
if (gop.value_ptr.section != section_index_map[this_section]) {
const sh_name = mem.sliceTo(shstrtab[s(shdrs[this_section].sh_name)..], 0);
+4 -4
View File
@@ -130,7 +130,7 @@ const Contents = struct {
};
const HashToContents = std.StringHashMap(Contents);
const TargetToHash = std.StringArrayHashMap([]const u8);
const TargetToHash = std.array_hash_map.String([]const u8);
const PathTable = std.StringHashMap(*TargetToHash);
const LibCVendor = enum {
@@ -317,7 +317,7 @@ pub fn main(init: std.process.Init) !void {
const path_gop = try path_table.getOrPut(rel_path);
const target_to_hash = if (path_gop.found_existing) path_gop.value_ptr.* else blk: {
const ptr = try arena.create(TargetToHash);
ptr.* = TargetToHash.init(arena);
ptr.* = .empty;
path_gop.value_ptr.* = ptr;
break :blk ptr;
};
@@ -327,14 +327,14 @@ pub fn main(init: std.process.Init) !void {
// such cases, we manually patch the affected header after processing, so it's fine that
// only one header wins here.
if (libc_target.dest != null) {
const hash_gop = try target_to_hash.getOrPut(dest_target);
const hash_gop = try target_to_hash.getOrPut(arena, dest_target);
if (hash_gop.found_existing) std.debug.print("overwrote: {s} {s} {s}\n", .{
libc_dir,
rel_path,
dest_target,
}) else hash_gop.value_ptr.* = hash;
} else {
try target_to_hash.putNoClobber(dest_target, hash);
try target_to_hash.putNoClobber(arena, dest_target, hash);
}
},
else => std.debug.print("warning: weird file: {s}\n", .{full_path}),
+3 -3
View File
@@ -138,7 +138,7 @@ const Contents = struct {
};
const HashToContents = std.StringHashMap(Contents);
const TargetToHash = std.ArrayHashMap(DestTarget, []const u8, DestTarget.HashContext, true);
const TargetToHash = std.array_hash_map.Custom(DestTarget, []const u8, DestTarget.HashContext, true);
const PathTable = std.StringHashMap(*TargetToHash);
pub fn main(init: std.process.Init) !void {
@@ -239,11 +239,11 @@ pub fn main(init: std.process.Init) !void {
const path_gop = try path_table.getOrPut(rel_path);
const target_to_hash = if (path_gop.found_existing) path_gop.value_ptr.* else blk: {
const ptr = try arena.create(TargetToHash);
ptr.* = TargetToHash.init(arena);
ptr.* = .empty;
path_gop.value_ptr.* = ptr;
break :blk ptr;
};
try target_to_hash.putNoClobber(dest_target, hash);
try target_to_hash.putNoClobber(arena, dest_target, hash);
},
else => std.debug.print("warning: weird file: {s}\n", .{full_path}),
}