Breaking hash map changes for 0.8.0

- hash/eql functions moved into a Context object
- *Context functions pass an explicit context
- *Adapted functions pass specialized keys and contexts
- new getPtr() function returns a pointer to value
- remove functions renamed to fetchRemove
- new remove functions return bool
- removeAssertDiscard deleted, use assert(remove(...)) instead
- Keys and values are stored in separate arrays
- Entry is now {*K, *V}, the new KV is {K, V}
- BufSet/BufMap functions renamed to match other set/map types
- fixed iterating-while-modifying bug in src/link/C.zig
This commit is contained in:
Martin Wickham
2021-06-03 15:39:26 -05:00
parent 87dae0ce98
commit fc9430f567
49 changed files with 3227 additions and 1545 deletions
+3 -3
View File
@@ -404,9 +404,9 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
.n = header_stack_size,
},
});
if (try urls.fetchPut(urlized, tag_token)) |entry| {
if (try urls.fetchPut(urlized, tag_token)) |kv| {
parseError(tokenizer, tag_token, "duplicate header url: #{s}", .{urlized}) catch {};
parseError(tokenizer, entry.value, "other tag here", .{}) catch {};
parseError(tokenizer, kv.value, "other tag here", .{}) catch {};
return error.ParseError;
}
if (last_action == Action.Open) {
@@ -1023,7 +1023,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: any
defer root_node.end();
var env_map = try process.getEnvMap(allocator);
try env_map.set("ZIG_DEBUG_COLOR", "1");
try env_map.put("ZIG_DEBUG_COLOR", "1");
const builtin_code = try getBuiltinCode(allocator, &env_map, zig_exe);
+1296 -510
View File
@@ -17,23 +17,36 @@ const Allocator = mem.Allocator;
const builtin = std.builtin;
const hash_map = @This();
/// An ArrayHashMap with default hash and equal functions.
/// See AutoContext for a description of the hash and equal implementations.
pub fn AutoArrayHashMap(comptime K: type, comptime V: type) type {
return ArrayHashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), !autoEqlIsCheap(K));
return ArrayHashMap(K, V, AutoContext(K), !autoEqlIsCheap(K));
}
/// An ArrayHashMapUnmanaged with default hash and equal functions.
/// See AutoContext for a description of the hash and equal implementations.
pub fn AutoArrayHashMapUnmanaged(comptime K: type, comptime V: type) type {
return ArrayHashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), !autoEqlIsCheap(K));
return ArrayHashMapUnmanaged(K, V, AutoContext(K), !autoEqlIsCheap(K));
}
/// Builtin hashmap for strings as keys.
pub fn StringArrayHashMap(comptime V: type) type {
return ArrayHashMap([]const u8, V, hashString, eqlString, true);
return ArrayHashMap([]const u8, V, StringContext, true);
}
pub fn StringArrayHashMapUnmanaged(comptime V: type) type {
return ArrayHashMapUnmanaged([]const u8, V, hashString, eqlString, true);
return ArrayHashMapUnmanaged([]const u8, V, StringContext, true);
}
pub const StringContext = struct {
pub fn hash(self: @This(), s: []const u8) u32 {
return hashString(s);
}
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
return eqlString(a, b);
}
};
pub fn eqlString(a: []const u8, b: []const u8) bool {
return mem.eql(u8, a, b);
}
@@ -54,83 +67,112 @@ pub fn hashString(s: []const u8) u32 {
/// but only has to call `eql` for hash collisions.
/// If typical operations (except iteration over entries) need to be faster, prefer
/// the alternative `std.HashMap`.
/// Context must be a struct type with two member functions:
/// hash(self, K) u32
/// eql(self, K, K) bool
/// Adapted variants of many functions are provided. These variants
/// take a pseudo key instead of a key. Their context must have the functions:
/// hash(self, PseudoKey) u32
/// eql(self, PseudoKey, K) bool
pub fn ArrayHashMap(
comptime K: type,
comptime V: type,
comptime hash: fn (key: K) u32,
comptime eql: fn (a: K, b: K) bool,
comptime Context: type,
comptime store_hash: bool,
) type {
comptime std.hash_map.verifyContext(Context, K, K, u32);
return struct {
unmanaged: Unmanaged,
allocator: *Allocator,
ctx: Context,
pub const Unmanaged = ArrayHashMapUnmanaged(K, V, hash, eql, store_hash);
/// The ArrayHashMapUnmanaged type using the same settings as this managed map.
pub const Unmanaged = ArrayHashMapUnmanaged(K, V, Context, store_hash);
/// Pointers to a key and value in the backing store of this map.
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
pub const Entry = Unmanaged.Entry;
/// A KV pair which has been copied out of the backing store
pub const KV = Unmanaged.KV;
/// The Data type used for the MultiArrayList backing this map
pub const Data = Unmanaged.Data;
/// The MultiArrayList type backing this map
pub const DataList = Unmanaged.DataList;
/// The stored hash type, either u32 or void.
pub const Hash = Unmanaged.Hash;
/// getOrPut variants return this structure, with pointers
/// to the backing store and a flag to indicate whether an
/// existing entry was found.
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
pub const GetOrPutResult = Unmanaged.GetOrPutResult;
/// Deprecated. Iterate using `items`.
pub const Iterator = struct {
hm: *const Self,
/// Iterator through the entry array.
index: usize,
pub fn next(it: *Iterator) ?*Entry {
if (it.index >= it.hm.unmanaged.entries.items.len) return null;
const result = &it.hm.unmanaged.entries.items[it.index];
it.index += 1;
return result;
}
/// Reset the iterator to the initial index
pub fn reset(it: *Iterator) void {
it.index = 0;
}
};
/// An Iterator over Entry pointers.
pub const Iterator = Unmanaged.Iterator;
const Self = @This();
const Index = Unmanaged.Index;
/// Create an ArrayHashMap instance which will use a specified allocator.
pub fn init(allocator: *Allocator) Self {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call initContext instead.");
return initContext(allocator, undefined);
}
pub fn initContext(allocator: *Allocator, ctx: Context) Self {
return .{
.unmanaged = .{},
.allocator = allocator,
.ctx = ctx,
};
}
/// `ArrayHashMap` takes ownership of the passed in array list. The array list must have
/// been allocated with `allocator`.
/// Deinitialize with `deinit`.
pub fn fromOwnedArrayList(allocator: *Allocator, entries: std.ArrayListUnmanaged(Entry)) !Self {
return Self{
.unmanaged = try Unmanaged.fromOwnedArrayList(allocator, entries),
.allocator = allocator,
};
}
/// Frees the backing allocation and leaves the map in an undefined state.
/// Note that this does not free keys or values. You must take care of that
/// before calling this function, if it is needed.
pub fn deinit(self: *Self) void {
self.unmanaged.deinit(self.allocator);
self.* = undefined;
}
/// Clears the map but retains the backing allocation for future use.
pub fn clearRetainingCapacity(self: *Self) void {
return self.unmanaged.clearRetainingCapacity();
}
/// Clears the map and releases the backing allocation
pub fn clearAndFree(self: *Self) void {
return self.unmanaged.clearAndFree(self.allocator);
}
/// Returns the number of KV pairs stored in this map.
pub fn count(self: Self) usize {
return self.unmanaged.count();
}
/// Returns the backing array of keys in this map.
/// Modifying the map may invalidate this array.
pub fn keys(self: Self) []K {
return self.unmanaged.keys();
}
/// Returns the backing array of values in this map.
/// Modifying the map may invalidate this array.
pub fn values(self: Self) []V {
return self.unmanaged.values();
}
/// Returns an iterator over the pairs in this map.
/// Modifying the map may invalidate this iterator.
pub fn iterator(self: *const Self) Iterator {
return Iterator{
.hm = self,
.index = 0,
};
return self.unmanaged.iterator();
}
/// If key exists this function cannot fail.
@@ -140,7 +182,10 @@ pub fn ArrayHashMap(
/// the `Entry` pointer points to it. Caller should then initialize
/// the value (but not the key).
pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
return self.unmanaged.getOrPut(self.allocator, key);
return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx);
}
pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult {
return self.unmanaged.getOrPutContextAdapted(key, ctx, self.ctx);
}
/// If there is an existing item with `key`, then the result
@@ -151,11 +196,13 @@ pub fn ArrayHashMap(
/// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it.
pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacity(key);
return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx);
}
pub fn getOrPutValue(self: *Self, key: K, value: V) !*Entry {
return self.unmanaged.getOrPutValue(self.allocator, key, value);
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx);
}
pub fn getOrPutValue(self: *Self, key: K, value: V) !GetOrPutResult {
return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx);
}
/// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
@@ -164,14 +211,14 @@ pub fn ArrayHashMap(
/// Increases capacity, guaranteeing that insertions up until the
/// `expected_count` will not cause an allocation, and therefore cannot fail.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) !void {
return self.unmanaged.ensureTotalCapacity(self.allocator, new_capacity);
return self.unmanaged.ensureTotalCapacityContext(self.allocator, new_capacity, self.ctx);
}
/// Increases capacity, guaranteeing that insertions up until
/// `additional_count` **more** items will not cause an allocation, and
/// therefore cannot fail.
pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) !void {
return self.unmanaged.ensureUnusedCapacity(self.allocator, additional_count);
return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx);
}
/// Returns the number of total elements which may be present before it is
@@ -183,119 +230,187 @@ pub fn ArrayHashMap(
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPut`.
pub fn put(self: *Self, key: K, value: V) !void {
return self.unmanaged.put(self.allocator, key, value);
return self.unmanaged.putContext(self.allocator, key, value, self.ctx);
}
/// Inserts a key-value pair into the hash map, asserting that no previous
/// entry with the same key is already present
pub fn putNoClobber(self: *Self, key: K, value: V) !void {
return self.unmanaged.putNoClobber(self.allocator, key, value);
return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
return self.unmanaged.putAssumeCapacity(key, value);
return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Asserts that it does not clobber any existing data.
/// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
return self.unmanaged.putAssumeCapacityNoClobber(key, value);
return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx);
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
pub fn fetchPut(self: *Self, key: K, value: V) !?Entry {
return self.unmanaged.fetchPut(self.allocator, key, value);
pub fn fetchPut(self: *Self, key: K, value: V) !?KV {
return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx);
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
/// If insertion happuns, asserts there is enough capacity without allocating.
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry {
return self.unmanaged.fetchPutAssumeCapacity(key, value);
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx);
}
pub fn getEntry(self: Self, key: K) ?*Entry {
return self.unmanaged.getEntry(key);
/// Finds pointers to the key and value storage associated with a key.
pub fn getEntry(self: Self, key: K) ?Entry {
return self.unmanaged.getEntryContext(key, self.ctx);
}
pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
return self.unmanaged.getEntryAdapted(key, ctx);
}
/// Finds the index in the `entries` array where a key is stored
pub fn getIndex(self: Self, key: K) ?usize {
return self.unmanaged.getIndex(key);
return self.unmanaged.getIndexContext(key, self.ctx);
}
pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize {
return self.unmanaged.getIndexAdapted(key, ctx);
}
/// Find the value associated with a key
pub fn get(self: Self, key: K) ?V {
return self.unmanaged.get(key);
return self.unmanaged.getContext(key, self.ctx);
}
pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
return self.unmanaged.getAdapted(key, ctx);
}
/// Find a pointer to the value associated with a key
pub fn getPtr(self: Self, key: K) ?*V {
return self.unmanaged.getPtrContext(key, self.ctx);
}
pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
return self.unmanaged.getPtrAdapted(key, ctx);
}
/// Check whether a key is stored in the map
pub fn contains(self: Self, key: K) bool {
return self.unmanaged.contains(key);
return self.unmanaged.containsContext(key, self.ctx);
}
pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.containsAdapted(key, ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function. The entry is
/// removed from the underlying array by swapping it with the last
/// element.
pub fn swapRemove(self: *Self, key: K) ?Entry {
return self.unmanaged.swapRemove(key);
pub fn fetchSwapRemove(self: *Self, key: K) ?KV {
return self.unmanaged.fetchSwapRemoveContext(key, self.ctx);
}
pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
return self.unmanaged.fetchSwapRemoveContextAdapted(key, ctx, self.ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function. The entry is
/// removed from the underlying array by shifting all elements forward
/// thereby maintaining the current ordering.
pub fn orderedRemove(self: *Self, key: K) ?Entry {
return self.unmanaged.orderedRemove(key);
pub fn fetchOrderedRemove(self: *Self, key: K) ?KV {
return self.unmanaged.fetchOrderedRemoveContext(key, self.ctx);
}
pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
return self.unmanaged.fetchOrderedRemoveContextAdapted(key, ctx, self.ctx);
}
/// TODO: deprecated: call swapRemoveAssertDiscard instead.
pub fn removeAssertDiscard(self: *Self, key: K) void {
return self.unmanaged.removeAssertDiscard(key);
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map. The entry is removed from the underlying array
/// by swapping it with the last element. Returns true if an entry
/// was removed, false otherwise.
pub fn swapRemove(self: *Self, key: K) bool {
return self.unmanaged.swapRemoveContext(key, self.ctx);
}
pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.swapRemoveContextAdapted(key, ctx, self.ctx);
}
/// Asserts there is an `Entry` with matching key, deletes it from the hash map
/// by swapping it with the last element, and discards it.
pub fn swapRemoveAssertDiscard(self: *Self, key: K) void {
return self.unmanaged.swapRemoveAssertDiscard(key);
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map. The entry is removed from the underlying array
/// by shifting all elements forward, thereby maintaining the
/// current ordering. Returns true if an entry was removed, false otherwise.
pub fn orderedRemove(self: *Self, key: K) bool {
return self.unmanaged.orderedRemoveContext(key, self.ctx);
}
pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.orderedRemoveContextAdapted(key, ctx, self.ctx);
}
/// Asserts there is an `Entry` with matching key, deletes it from the hash map
/// by by shifting all elements forward thereby maintaining the current ordering.
pub fn orderedRemoveAssertDiscard(self: *Self, key: K) void {
return self.unmanaged.orderedRemoveAssertDiscard(key);
/// Deletes the item at the specified index in `entries` from
/// the hash map. The entry is removed from the underlying array
/// by swapping it with the last element.
pub fn swapRemoveAt(self: *Self, index: usize) void {
self.unmanaged.swapRemoveAtContext(index, self.ctx);
}
pub fn items(self: Self) []Entry {
return self.unmanaged.items();
/// Deletes the item at the specified index in `entries` from
/// the hash map. The entry is removed from the underlying array
/// by shifting all elements forward, thereby maintaining the
/// current ordering.
pub fn orderedRemoveAt(self: *Self, index: usize) void {
self.unmanaged.orderedRemoveAtContext(index, self.ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context and allocator as this instance.
pub fn clone(self: Self) !Self {
var other = try self.unmanaged.clone(self.allocator);
return other.promote(self.allocator);
var other = try self.unmanaged.cloneContext(self.allocator, self.ctx);
return other.promoteContext(self.allocator, self.ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context as this instance, but the specified
/// allocator.
pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self {
var other = try self.unmanaged.cloneContext(allocator, self.ctx);
return other.promoteContext(allocator, self.ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same allocator as this instance, but the
/// specified context.
pub fn cloneWithContext(self: Self, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
var other = try self.unmanaged.cloneContext(self.allocator, ctx);
return other.promoteContext(self.allocator, ctx);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the specified allocator and context.
pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
var other = try self.unmanaged.cloneContext(allocator, ctx);
return other.promoteContext(allocator, ctx);
}
/// Rebuilds the key indexes. If the underlying entries has been modified directly, users
/// can call `reIndex` to update the indexes to account for these new entries.
pub fn reIndex(self: *Self) !void {
return self.unmanaged.reIndex(self.allocator);
return self.unmanaged.reIndexContext(self.allocator, self.ctx);
}
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
/// index entries. Keeps capacity the same.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
return self.unmanaged.shrinkRetainingCapacity(new_len);
return self.unmanaged.shrinkRetainingCapacityContext(new_len, self.ctx);
}
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
/// index entries. Reduces allocated capacity.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
return self.unmanaged.shrinkAndFree(self.allocator, new_len);
return self.unmanaged.shrinkAndFreeContext(self.allocator, new_len, self.ctx);
}
/// Removes the last inserted `Entry` in the hash map and returns it.
pub fn pop(self: *Self) Entry {
return self.unmanaged.pop();
pub fn pop(self: *Self) KV {
return self.unmanaged.popContext(self.ctx);
}
};
}
@@ -317,16 +432,23 @@ pub fn ArrayHashMap(
/// functions. It does not store each item's hash in the table. Setting `store_hash`
/// to `true` incurs slightly more memory cost by storing each key's hash in the table
/// but guarantees only one call to `eql` per insertion/deletion.
/// Context must be a struct type with two member functions:
/// hash(self, K) u32
/// eql(self, K, K) bool
/// Adapted variants of many functions are provided. These variants
/// take a pseudo key instead of a key. Their context must have the functions:
/// hash(self, PseudoKey) u32
/// eql(self, PseudoKey, K) bool
pub fn ArrayHashMapUnmanaged(
comptime K: type,
comptime V: type,
comptime hash: fn (key: K) u32,
comptime eql: fn (a: K, b: K) bool,
comptime Context: type,
comptime store_hash: bool,
) type {
comptime std.hash_map.verifyContext(Context, K, K, u32);
return struct {
/// It is permitted to access this field directly.
entries: std.ArrayListUnmanaged(Entry) = .{},
entries: DataList = .{},
/// When entries length is less than `linear_scan_max`, this remains `null`.
/// Once entries length grows big enough, this field is allocated. There is
@@ -334,26 +456,54 @@ pub fn ArrayHashMapUnmanaged(
/// by how many total indexes there are.
index_header: ?*IndexHeader = null,
/// Modifying the key is illegal behavior.
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
pub const Entry = struct {
/// This field is `void` if `store_hash` is `false`.
key_ptr: *K,
value_ptr: *V,
};
/// A KV pair which has been copied out of the backing store
pub const KV = struct {
key: K,
value: V,
};
/// The Data type used for the MultiArrayList backing this map
pub const Data = struct {
hash: Hash,
key: K,
value: V,
};
/// The MultiArrayList type backing this map
pub const DataList = std.MultiArrayList(Data);
/// The stored hash type, either u32 or void.
pub const Hash = if (store_hash) u32 else void;
/// getOrPut variants return this structure, with pointers
/// to the backing store and a flag to indicate whether an
/// existing entry was found.
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
/// unless `ensureCapacity` was previously used.
pub const GetOrPutResult = struct {
entry: *Entry,
key_ptr: *K,
value_ptr: *V,
found_existing: bool,
index: usize,
};
pub const Managed = ArrayHashMap(K, V, hash, eql, store_hash);
/// The ArrayHashMap type using the same settings as this managed map.
pub const Managed = ArrayHashMap(K, V, Context, store_hash);
/// Some functions require a context only if hashes are not stored.
/// To keep the api simple, this type is only used internally.
const ByIndexContext = if (store_hash) void else Context;
const Self = @This();
@@ -362,25 +512,26 @@ pub fn ArrayHashMapUnmanaged(
const RemovalType = enum {
swap,
ordered,
index_only,
};
/// Convert from an unmanaged map to a managed map. After calling this,
/// the promoted map should no longer be used.
pub fn promote(self: Self, allocator: *Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call promoteContext instead.");
return self.promoteContext(allocator, undefined);
}
pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = allocator,
.ctx = ctx,
};
}
/// `ArrayHashMapUnmanaged` takes ownership of the passed in array list. The array list must
/// have been allocated with `allocator`.
/// Deinitialize with `deinit`.
pub fn fromOwnedArrayList(allocator: *Allocator, entries: std.ArrayListUnmanaged(Entry)) !Self {
var array_hash_map = Self{ .entries = entries };
try array_hash_map.reIndex(allocator);
return array_hash_map;
}
/// Frees the backing allocation and leaves the map in an undefined state.
/// Note that this does not free keys or values. You must take care of that
/// before calling this function, if it is needed.
pub fn deinit(self: *Self, allocator: *Allocator) void {
self.entries.deinit(allocator);
if (self.index_header) |header| {
@@ -389,19 +540,19 @@ pub fn ArrayHashMapUnmanaged(
self.* = undefined;
}
/// Clears the map but retains the backing allocation for future use.
pub fn clearRetainingCapacity(self: *Self) void {
self.entries.items.len = 0;
self.entries.len = 0;
if (self.index_header) |header| {
header.max_distance_from_start_index = 0;
switch (header.capacityIndexType()) {
.u8 => mem.set(Index(u8), header.indexes(u8), Index(u8).empty),
.u16 => mem.set(Index(u16), header.indexes(u16), Index(u16).empty),
.u32 => mem.set(Index(u32), header.indexes(u32), Index(u32).empty),
.usize => mem.set(Index(usize), header.indexes(usize), Index(usize).empty),
}
}
}
/// Clears the map and releases the backing allocation
pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
self.entries.shrinkAndFree(allocator, 0);
if (self.index_header) |header| {
@@ -410,10 +561,55 @@ pub fn ArrayHashMapUnmanaged(
}
}
/// Returns the number of KV pairs stored in this map.
pub fn count(self: Self) usize {
return self.entries.items.len;
return self.entries.len;
}
/// Returns the backing array of keys in this map.
/// Modifying the map may invalidate this array.
pub fn keys(self: Self) []K {
return self.entries.items(.key);
}
/// Returns the backing array of values in this map.
/// Modifying the map may invalidate this array.
pub fn values(self: Self) []V {
return self.entries.items(.value);
}
/// Returns an iterator over the pairs in this map.
/// Modifying the map may invalidate this iterator.
pub fn iterator(self: Self) Iterator {
const slice = self.entries.slice();
return .{
.keys = slice.items(.key).ptr,
.values = slice.items(.value).ptr,
.len = @intCast(u32, slice.len),
};
}
pub const Iterator = struct {
keys: [*]K,
values: [*]V,
len: u32,
index: u32 = 0,
pub fn next(it: *Iterator) ?Entry {
if (it.index >= it.len) return null;
const result = Entry{
.key_ptr = &it.keys[it.index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &it.values[it.index],
};
it.index += 1;
return result;
}
/// Reset the iterator to the initial index
pub fn reset(it: *Iterator) void {
it.index = 0;
}
};
/// If key exists this function cannot fail.
/// If there is an existing item with `key`, then the result
/// `Entry` pointer points to it, and found_existing is true.
@@ -421,16 +617,36 @@ pub fn ArrayHashMapUnmanaged(
/// the `Entry` pointer points to it. Caller should then initialize
/// the value (but not the key).
pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
self.ensureCapacity(allocator, self.entries.items.len + 1) catch |err| {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContext instead.");
return self.getOrPutContext(allocator, key, undefined);
}
pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContextAdapted instead.");
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| {
// "If key exists this function cannot fail."
const index = self.getIndex(key) orelse return err;
const index = self.getIndexAdapted(key, key_ctx) orelse return err;
const slice = self.entries.slice();
return GetOrPutResult{
.entry = &self.entries.items[index],
.key_ptr = &slice.items(.key)[index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[index],
.found_existing = true,
.index = index,
};
};
return self.getOrPutAssumeCapacity(key);
return self.getOrPutAssumeCapacityAdapted(key, key_ctx);
}
/// If there is an existing item with `key`, then the result
@@ -441,45 +657,75 @@ pub fn ArrayHashMapUnmanaged(
/// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it.
pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutAssumeCapacityContext instead.");
return self.getOrPutAssumeCapacityContext(key, undefined);
}
pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult {
const gop = self.getOrPutAssumeCapacityAdapted(key, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
/// If there is an existing item with `key`, then the result
/// `Entry` pointers point to it, and found_existing is true.
/// Otherwise, puts a new item with undefined key and value, and
/// the `Entry` pointers point to it. Caller must then initialize
/// both the key and the value.
/// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it.
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
const header = self.index_header orelse {
// Linear scan.
const h = if (store_hash) hash(key) else {};
for (self.entries.items) |*item, i| {
if (item.hash == h and eql(key, item.key)) {
const h = if (store_hash) checkedHash(ctx, key) else {};
const slice = self.entries.slice();
const hashes_array = slice.items(.hash);
const keys_array = slice.items(.key);
for (keys_array) |*item_key, i| {
if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*)) {
return GetOrPutResult{
.entry = item,
.key_ptr = item_key,
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[i],
.found_existing = true,
.index = i,
};
}
}
const new_entry = self.entries.addOneAssumeCapacity();
new_entry.* = .{
.hash = if (store_hash) h else {},
.key = key,
.value = undefined,
};
const index = self.entries.addOneAssumeCapacity();
// unsafe indexing because the length changed
if (store_hash) hashes_array.ptr[index] = h;
return GetOrPutResult{
.entry = new_entry,
.key_ptr = &keys_array.ptr[index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value).ptr[index],
.found_existing = false,
.index = self.entries.items.len - 1,
.index = index,
};
};
switch (header.capacityIndexType()) {
.u8 => return self.getOrPutInternal(key, header, u8),
.u16 => return self.getOrPutInternal(key, header, u16),
.u32 => return self.getOrPutInternal(key, header, u32),
.usize => return self.getOrPutInternal(key, header, usize),
.u8 => return self.getOrPutInternal(key, ctx, header, u8),
.u16 => return self.getOrPutInternal(key, ctx, header, u16),
.u32 => return self.getOrPutInternal(key, ctx, header, u32),
}
}
pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !*Entry {
const res = try self.getOrPut(allocator, key);
if (!res.found_existing)
res.entry.value = value;
return res.entry;
pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutValueContext instead.");
return self.getOrPutValueContext(allocator, key, value, undefined);
}
pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!res.found_existing) {
res.key_ptr.* = key;
res.value_ptr.* = value;
}
return res;
}
/// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`.
@@ -488,30 +734,30 @@ pub fn ArrayHashMapUnmanaged(
/// Increases capacity, guaranteeing that insertions up until the
/// `expected_count` will not cause an allocation, and therefore cannot fail.
pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
try self.entries.ensureTotalCapacity(allocator, new_capacity);
if (new_capacity <= linear_scan_max) return;
// Ensure that the indexes will be at most 60% full if
// `new_capacity` items are put into it.
const needed_len = new_capacity * 5 / 3;
if (self.index_header) |header| {
if (needed_len > header.indexes_len) {
// An overflow here would mean the amount of memory required would not
// be representable in the address space.
const new_indexes_len = math.ceilPowerOfTwo(usize, needed_len) catch unreachable;
const new_header = try IndexHeader.alloc(allocator, new_indexes_len);
self.insertAllEntriesIntoNewHeader(new_header);
header.free(allocator);
self.index_header = new_header;
}
} else {
// An overflow here would mean the amount of memory required would not
// be representable in the address space.
const new_indexes_len = math.ceilPowerOfTwo(usize, needed_len) catch unreachable;
const header = try IndexHeader.alloc(allocator, new_indexes_len);
self.insertAllEntriesIntoNewHeader(header);
self.index_header = header;
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call ensureTotalCapacityContext instead.");
return self.ensureTotalCapacityContext(allocator, new_capacity, undefined);
}
pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
if (new_capacity <= linear_scan_max) {
try self.entries.ensureCapacity(allocator, new_capacity);
return;
}
if (self.index_header) |header| {
if (new_capacity <= header.capacity()) {
try self.entries.ensureCapacity(allocator, new_capacity);
return;
}
}
const new_bit_index = try IndexHeader.findBitIndex(new_capacity);
const new_header = try IndexHeader.alloc(allocator, new_bit_index);
try self.entries.ensureCapacity(allocator, new_capacity);
if (self.index_header) |old_header| old_header.free(allocator);
self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
self.index_header = new_header;
}
/// Increases capacity, guaranteeing that insertions up until
@@ -522,7 +768,17 @@ pub fn ArrayHashMapUnmanaged(
allocator: *Allocator,
additional_capacity: usize,
) !void {
return self.ensureTotalCapacity(allocator, self.count() + additional_capacity);
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call ensureTotalCapacityContext instead.");
return self.ensureUnusedCapacityContext(allocator, additional_capacity, undefined);
}
pub fn ensureUnusedCapacityContext(
self: *Self,
allocator: *Allocator,
additional_capacity: usize,
ctx: Context,
) !void {
return self.ensureTotalCapacityContext(allocator, self.count() + additional_capacity, ctx);
}
/// Returns the number of total elements which may be present before it is
@@ -530,141 +786,321 @@ pub fn ArrayHashMapUnmanaged(
pub fn capacity(self: Self) usize {
const entry_cap = self.entries.capacity;
const header = self.index_header orelse return math.min(linear_scan_max, entry_cap);
const indexes_cap = (header.indexes_len + 1) * 3 / 4;
const indexes_cap = header.capacity();
return math.min(entry_cap, indexes_cap);
}
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPut`.
pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
const result = try self.getOrPut(allocator, key);
result.entry.value = value;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putContext instead.");
return self.putContext(allocator, key, value, undefined);
}
pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
result.value_ptr.* = value;
}
/// Inserts a key-value pair into the hash map, asserting that no previous
/// entry with the same key is already present
pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
const result = try self.getOrPut(allocator, key);
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putNoClobberContext instead.");
return self.putNoClobberContext(allocator, key, value, undefined);
}
pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
assert(!result.found_existing);
result.entry.value = value;
result.value_ptr.* = value;
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
const result = self.getOrPutAssumeCapacity(key);
result.entry.value = value;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityContext instead.");
return self.putAssumeCapacityContext(key, value, undefined);
}
pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void {
const result = self.getOrPutAssumeCapacityContext(key, ctx);
result.value_ptr.* = value;
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Asserts that it does not clobber any existing data.
/// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
const result = self.getOrPutAssumeCapacity(key);
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityNoClobberContext instead.");
return self.putAssumeCapacityNoClobberContext(key, value, undefined);
}
pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void {
const result = self.getOrPutAssumeCapacityContext(key, ctx);
assert(!result.found_existing);
result.entry.value = value;
result.value_ptr.* = value;
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?Entry {
const gop = try self.getOrPut(allocator, key);
var result: ?Entry = null;
pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchPutContext instead.");
return self.fetchPutContext(allocator, key, value, undefined);
}
pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
const gop = try self.getOrPutContext(allocator, key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
result = gop.entry.*;
result = KV{
.key = gop.key_ptr.*,
.value = gop.value_ptr.*,
};
}
gop.entry.value = value;
gop.value_ptr.* = value;
return result;
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
/// If insertion happens, asserts there is enough capacity without allocating.
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry {
const gop = self.getOrPutAssumeCapacity(key);
var result: ?Entry = null;
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchPutAssumeCapacityContext instead.");
return self.fetchPutAssumeCapacityContext(key, value, undefined);
}
pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV {
const gop = self.getOrPutAssumeCapacityContext(key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
result = gop.entry.*;
result = KV{
.key = gop.key_ptr.*,
.value = gop.value_ptr.*,
};
}
gop.entry.value = value;
gop.value_ptr.* = value;
return result;
}
pub fn getEntry(self: Self, key: K) ?*Entry {
const index = self.getIndex(key) orelse return null;
return &self.entries.items[index];
/// Finds pointers to the key and value storage associated with a key.
pub fn getEntry(self: Self, key: K) ?Entry {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getEntryContext instead.");
return self.getEntryContext(key, undefined);
}
pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry {
return self.getEntryAdapted(key, ctx);
}
pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
const index = self.getIndexAdapted(key, ctx) orelse return null;
const slice = self.entries.slice();
return Entry{
.key_ptr = &slice.items(.key)[index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &slice.items(.value)[index],
};
}
/// Finds the index in the `entries` array where a key is stored
pub fn getIndex(self: Self, key: K) ?usize {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getIndexContext instead.");
return self.getIndexContext(key, undefined);
}
pub fn getIndexContext(self: Self, key: K, ctx: Context) ?usize {
return self.getIndexAdapted(key, ctx);
}
pub fn getIndexAdapted(self: Self, key: anytype, ctx: anytype) ?usize {
const header = self.index_header orelse {
// Linear scan.
const h = if (store_hash) hash(key) else {};
for (self.entries.items) |*item, i| {
if (item.hash == h and eql(key, item.key)) {
const h = if (store_hash) checkedHash(ctx, key) else {};
const slice = self.entries.slice();
const hashes_array = slice.items(.hash);
const keys_array = slice.items(.key);
for (keys_array) |*item_key, i| {
if (hashes_array[i] == h and checkedEql(ctx, key, item_key.*)) {
return i;
}
}
return null;
};
switch (header.capacityIndexType()) {
.u8 => return self.getInternal(key, header, u8),
.u16 => return self.getInternal(key, header, u16),
.u32 => return self.getInternal(key, header, u32),
.usize => return self.getInternal(key, header, usize),
.u8 => return self.getIndexWithHeaderGeneric(key, ctx, header, u8),
.u16 => return self.getIndexWithHeaderGeneric(key, ctx, header, u16),
.u32 => return self.getIndexWithHeaderGeneric(key, ctx, header, u32),
}
}
pub fn get(self: Self, key: K) ?V {
return if (self.getEntry(key)) |entry| entry.value else null;
fn getIndexWithHeaderGeneric(self: Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) ?usize {
const indexes = header.indexes(I);
const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null;
return indexes[slot].entry_index;
}
/// Find the value associated with a key
pub fn get(self: Self, key: K) ?V {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getContext instead.");
return self.getContext(key, undefined);
}
pub fn getContext(self: Self, key: K, ctx: Context) ?V {
return self.getAdapted(key, ctx);
}
pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
const index = self.getIndexAdapted(key, ctx) orelse return null;
return self.values()[index];
}
/// Find a pointer to the value associated with a key
pub fn getPtr(self: Self, key: K) ?*V {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getPtrContext instead.");
return self.getPtrContext(key, undefined);
}
pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V {
return self.getPtrAdapted(key, ctx);
}
pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
const index = self.getIndexAdapted(key, ctx) orelse return null;
// workaround for #6974
return if (@sizeOf(*V) == 0) @as(*V, undefined) else &self.values()[index];
}
/// Check whether a key is stored in the map
pub fn contains(self: Self, key: K) bool {
return self.getEntry(key) != null;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call containsContext instead.");
return self.containsContext(key, undefined);
}
pub fn containsContext(self: Self, key: K, ctx: Context) bool {
return self.containsAdapted(key, ctx);
}
pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
return self.getIndexAdapted(key, ctx) != null;
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function. The entry is
/// removed from the underlying array by swapping it with the last
/// element.
pub fn swapRemove(self: *Self, key: K) ?Entry {
return self.removeInternal(key, .swap);
pub fn fetchSwapRemove(self: *Self, key: K) ?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchSwapRemoveContext instead.");
return self.fetchSwapRemoveContext(key, undefined);
}
pub fn fetchSwapRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
return self.fetchSwapRemoveContextAdapted(key, ctx, ctx);
}
pub fn fetchSwapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchSwapRemoveContextAdapted instead.");
return self.fetchSwapRemoveContextAdapted(key, ctx, undefined);
}
pub fn fetchSwapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV {
return self.fetchRemoveByKey(key, key_ctx, if (store_hash) {} else ctx, .swap);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function. The entry is
/// removed from the underlying array by shifting all elements forward
/// thereby maintaining the current ordering.
pub fn orderedRemove(self: *Self, key: K) ?Entry {
return self.removeInternal(key, .ordered);
pub fn fetchOrderedRemove(self: *Self, key: K) ?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchOrderedRemoveContext instead.");
return self.fetchOrderedRemoveContext(key, undefined);
}
pub fn fetchOrderedRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
return self.fetchOrderedRemoveContextAdapted(key, ctx, ctx);
}
pub fn fetchOrderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchOrderedRemoveContextAdapted instead.");
return self.fetchOrderedRemoveContextAdapted(key, ctx, undefined);
}
pub fn fetchOrderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) ?KV {
return self.fetchRemoveByKey(key, key_ctx, if (store_hash) {} else ctx, .ordered);
}
/// TODO deprecated: call swapRemoveAssertDiscard instead.
pub fn removeAssertDiscard(self: *Self, key: K) void {
return self.swapRemoveAssertDiscard(key);
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map. The entry is removed from the underlying array
/// by swapping it with the last element. Returns true if an entry
/// was removed, false otherwise.
pub fn swapRemove(self: *Self, key: K) bool {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call swapRemoveContext instead.");
return self.swapRemoveContext(key, undefined);
}
pub fn swapRemoveContext(self: *Self, key: K, ctx: Context) bool {
return self.swapRemoveContextAdapted(key, ctx, ctx);
}
pub fn swapRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call swapRemoveContextAdapted instead.");
return self.swapRemoveContextAdapted(key, ctx, undefined);
}
pub fn swapRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool {
return self.removeByKey(key, key_ctx, if (store_hash) {} else ctx, .swap);
}
/// Asserts there is an `Entry` with matching key, deletes it from the hash map
/// by swapping it with the last element, and discards it.
pub fn swapRemoveAssertDiscard(self: *Self, key: K) void {
assert(self.swapRemove(key) != null);
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map. The entry is removed from the underlying array
/// by shifting all elements forward, thereby maintaining the
/// current ordering. Returns true if an entry was removed, false otherwise.
pub fn orderedRemove(self: *Self, key: K) bool {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call orderedRemoveContext instead.");
return self.orderedRemoveContext(key, undefined);
}
pub fn orderedRemoveContext(self: *Self, key: K, ctx: Context) bool {
return self.orderedRemoveContextAdapted(key, ctx, ctx);
}
pub fn orderedRemoveAdapted(self: *Self, key: anytype, ctx: anytype) bool {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call orderedRemoveContextAdapted instead.");
return self.orderedRemoveContextAdapted(key, ctx, undefined);
}
pub fn orderedRemoveContextAdapted(self: *Self, key: anytype, key_ctx: anytype, ctx: Context) bool {
return self.removeByKey(key, key_ctx, if (store_hash) {} else ctx, .ordered);
}
/// Asserts there is an `Entry` with matching key, deletes it from the hash map
/// by by shifting all elements forward thereby maintaining the current ordering.
pub fn orderedRemoveAssertDiscard(self: *Self, key: K) void {
assert(self.orderedRemove(key) != null);
/// Deletes the item at the specified index in `entries` from
/// the hash map. The entry is removed from the underlying array
/// by swapping it with the last element.
pub fn swapRemoveAt(self: *Self, index: usize) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call swapRemoveAtContext instead.");
return self.swapRemoveAtContext(index, undefined);
}
pub fn swapRemoveAtContext(self: *Self, index: usize, ctx: Context) void {
self.removeByIndex(index, if (store_hash) {} else ctx, .swap);
}
pub fn items(self: Self) []Entry {
return self.entries.items;
/// Deletes the item at the specified index in `entries` from
/// the hash map. The entry is removed from the underlying array
/// by shifting all elements forward, thereby maintaining the
/// current ordering.
pub fn orderedRemoveAt(self: *Self, index: usize) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call orderedRemoveAtContext instead.");
return self.orderedRemoveAtContext(index, undefined);
}
pub fn orderedRemoveAtContext(self: *Self, index: usize, ctx: Context) void {
self.removeByIndex(index, if (store_hash) {} else ctx, .ordered);
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context and allocator as this instance.
pub fn clone(self: Self, allocator: *Allocator) !Self {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call cloneContext instead.");
return self.cloneContext(allocator, undefined);
}
pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self {
var other: Self = .{};
try other.entries.appendSlice(allocator, self.entries.items);
other.entries = try self.entries.clone(allocator);
errdefer other.entries.deinit(allocator);
if (self.index_header) |header| {
const new_header = try IndexHeader.alloc(allocator, header.indexes_len);
other.insertAllEntriesIntoNewHeader(new_header);
const new_header = try IndexHeader.alloc(allocator, header.bit_index);
other.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
other.index_header = new_header;
}
return other;
@@ -673,135 +1109,197 @@ pub fn ArrayHashMapUnmanaged(
/// Rebuilds the key indexes. If the underlying entries has been modified directly, users
/// can call `reIndex` to update the indexes to account for these new entries.
pub fn reIndex(self: *Self, allocator: *Allocator) !void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call reIndexContext instead.");
return self.reIndexContext(allocator, undefined);
}
pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void {
if (self.entries.capacity <= linear_scan_max) return;
// We're going to rebuild the index header and replace the existing one (if any). The
// indexes should sized such that they will be at most 60% full.
const needed_len = self.entries.capacity * 5 / 3;
const new_indexes_len = math.ceilPowerOfTwo(usize, needed_len) catch unreachable;
const new_header = try IndexHeader.alloc(allocator, new_indexes_len);
self.insertAllEntriesIntoNewHeader(new_header);
if (self.index_header) |header|
header.free(allocator);
const bit_index = try IndexHeader.findBitIndex(self.entries.capacity);
const new_header = try IndexHeader.alloc(allocator, bit_index);
if (self.index_header) |header| header.free(allocator);
self.insertAllEntriesIntoNewHeader(if (store_hash) {} else ctx, new_header);
self.index_header = new_header;
}
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
/// index entries. Keeps capacity the same.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call shrinkRetainingCapacityContext instead.");
return self.shrinkRetainingCapacityContext(new_len, undefined);
}
pub fn shrinkRetainingCapacityContext(self: *Self, new_len: usize, ctx: Context) void {
// Remove index entries from the new length onwards.
// Explicitly choose to ONLY remove index entries and not the underlying array list
// entries as we're going to remove them in the subsequent shrink call.
var i: usize = new_len;
while (i < self.entries.items.len) : (i += 1)
_ = self.removeWithHash(self.entries.items[i].key, self.entries.items[i].hash, .index_only);
if (self.index_header) |header| {
var i: usize = new_len;
while (i < self.entries.len) : (i += 1)
self.removeFromIndexByIndex(i, if (store_hash) {} else ctx, header);
}
self.entries.shrinkRetainingCapacity(new_len);
}
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
/// index entries. Reduces allocated capacity.
pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call shrinkAndFreeContext instead.");
return self.shrinkAndFreeContext(allocator, new_len, undefined);
}
pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void {
// Remove index entries from the new length onwards.
// Explicitly choose to ONLY remove index entries and not the underlying array list
// entries as we're going to remove them in the subsequent shrink call.
var i: usize = new_len;
while (i < self.entries.items.len) : (i += 1)
_ = self.removeWithHash(self.entries.items[i].key, self.entries.items[i].hash, .index_only);
if (self.index_header) |header| {
var i: usize = new_len;
while (i < self.entries.len) : (i += 1)
self.removeFromIndexByIndex(i, if (store_hash) {} else ctx, header);
}
self.entries.shrinkAndFree(allocator, new_len);
}
/// Removes the last inserted `Entry` in the hash map and returns it.
pub fn pop(self: *Self) Entry {
const top = self.entries.items[self.entries.items.len - 1];
_ = self.removeWithHash(top.key, top.hash, .index_only);
self.entries.items.len -= 1;
return top;
pub fn pop(self: *Self) KV {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call popContext instead.");
return self.popContext(undefined);
}
pub fn popContext(self: *Self, ctx: Context) KV {
const item = self.entries.get(self.entries.len-1);
if (self.index_header) |header|
self.removeFromIndexByIndex(self.entries.len-1, if (store_hash) {} else ctx, header);
self.entries.len -= 1;
return .{
.key = item.key,
.value = item.value,
};
}
fn removeInternal(self: *Self, key: K, comptime removal_type: RemovalType) ?Entry {
const key_hash = if (store_hash) hash(key) else {};
return self.removeWithHash(key, key_hash, removal_type);
}
// ------------------ No pub fns below this point ------------------
fn removeWithHash(self: *Self, key: K, key_hash: Hash, comptime removal_type: RemovalType) ?Entry {
fn fetchRemoveByKey(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, comptime removal_type: RemovalType) ?KV {
const header = self.index_header orelse {
// If we're only removing index entries and we have no index header, there's no need
// to continue.
if (removal_type == .index_only) return null;
// Linear scan.
for (self.entries.items) |item, i| {
if (item.hash == key_hash and eql(key, item.key)) {
const key_hash = if (store_hash) key_ctx.hash(key) else {};
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
for (keys_array) |*item_key, i| {
const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
if (hash_match and key_ctx.eql(key, item_key.*)) {
const removed_entry: KV = .{
.key = keys_array[i],
.value = slice.items(.value)[i],
};
switch (removal_type) {
.swap => return self.entries.swapRemove(i),
.ordered => return self.entries.orderedRemove(i),
.index_only => unreachable,
.swap => self.entries.swapRemove(i),
.ordered => self.entries.orderedRemove(i),
}
return removed_entry;
}
}
return null;
};
switch (header.capacityIndexType()) {
.u8 => return self.removeWithIndex(key, key_hash, header, u8, removal_type),
.u16 => return self.removeWithIndex(key, key_hash, header, u16, removal_type),
.u32 => return self.removeWithIndex(key, key_hash, header, u32, removal_type),
.usize => return self.removeWithIndex(key, key_hash, header, usize, removal_type),
}
return switch (header.capacityIndexType()) {
.u8 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u8, removal_type),
.u16 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u16, removal_type),
.u32 => self.fetchRemoveByKeyGeneric(key, key_ctx, ctx, header, u32, removal_type),
};
}
fn fetchRemoveByKeyGeneric(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) ?KV {
const indexes = header.indexes(I);
const entry_index = self.removeFromIndexByKey(key, key_ctx, header, I, indexes) orelse return null;
const slice = self.entries.slice();
const removed_entry: KV = .{
.key = slice.items(.key)[entry_index],
.value = slice.items(.value)[entry_index],
};
self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type);
return removed_entry;
}
fn removeWithIndex(self: *Self, key: K, key_hash: Hash, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) ?Entry {
const indexes = header.indexes(I);
const h = if (store_hash) key_hash else hash(key);
const start_index = header.constrainIndex(h);
var roll_over: usize = 0;
while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) {
const index_index = header.constrainIndex(start_index + roll_over);
var index = &indexes[index_index];
if (index.isEmpty())
return null;
const entry = &self.entries.items[index.entry_index];
const hash_match = if (store_hash) h == entry.hash else true;
if (!hash_match or !eql(key, entry.key))
continue;
var removed_entry: ?Entry = undefined;
switch (removal_type) {
.swap => {
removed_entry = self.entries.swapRemove(index.entry_index);
if (self.entries.items.len > 0 and self.entries.items.len != index.entry_index) {
// Because of the swap remove, now we need to update the index that was
// pointing to the last entry and is now pointing to this removed item slot.
self.updateEntryIndex(header, self.entries.items.len, index.entry_index, I, indexes);
fn removeByKey(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, comptime removal_type: RemovalType) bool {
const header = self.index_header orelse {
// Linear scan.
const key_hash = if (store_hash) key_ctx.hash(key) else {};
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
for (keys_array) |*item_key, i| {
const hash_match = if (store_hash) hashes_array[i] == key_hash else true;
if (hash_match and key_ctx.eql(key, item_key.*)) {
switch (removal_type) {
.swap => self.entries.swapRemove(i),
.ordered => self.entries.orderedRemove(i),
}
},
.ordered => {
removed_entry = self.entries.orderedRemove(index.entry_index);
var i: usize = index.entry_index;
while (i < self.entries.items.len) : (i += 1) {
// Because of the ordered remove, everything from the entry index onwards has
// been shifted forward so we'll need to update the index entries.
self.updateEntryIndex(header, i + 1, i, I, indexes);
}
},
.index_only => removed_entry = null,
}
// Now we have to shift over the following indexes.
roll_over += 1;
while (roll_over < header.indexes_len) : (roll_over += 1) {
const next_index_index = header.constrainIndex(start_index + roll_over);
const next_index = &indexes[next_index_index];
if (next_index.isEmpty() or next_index.distance_from_start_index == 0) {
index.setEmpty();
return removed_entry;
return true;
}
index.* = next_index.*;
index.distance_from_start_index -= 1;
index = next_index;
}
unreachable;
return false;
};
return switch (header.capacityIndexType()) {
.u8 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u8, removal_type),
.u16 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u16, removal_type),
.u32 => self.removeByKeyGeneric(key, key_ctx, ctx, header, u32, removal_type),
};
}
fn removeByKeyGeneric(self: *Self, key: anytype, key_ctx: anytype, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) bool {
const indexes = header.indexes(I);
const entry_index = self.removeFromIndexByKey(key, key_ctx, header, I, indexes) orelse return false;
self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type);
return true;
}
fn removeByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, comptime removal_type: RemovalType) void {
assert(entry_index < self.entries.len);
const header = self.index_header orelse {
switch (removal_type) {
.swap => self.entries.swapRemove(entry_index),
.ordered => self.entries.orderedRemove(entry_index),
}
return;
};
switch (header.capacityIndexType()) {
.u8 => self.removeByIndexGeneric(entry_index, ctx, header, u8, removal_type),
.u16 => self.removeByIndexGeneric(entry_index, ctx, header, u16, removal_type),
.u32 => self.removeByIndexGeneric(entry_index, ctx, header, u32, removal_type),
}
}
fn removeByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, comptime removal_type: RemovalType) void {
const indexes = header.indexes(I);
self.removeFromIndexByIndexGeneric(entry_index, ctx, header, I, indexes);
self.removeFromArrayAndUpdateIndex(entry_index, ctx, header, I, indexes, removal_type);
}
fn removeFromArrayAndUpdateIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I), comptime removal_type: RemovalType) void {
const last_index = self.entries.len-1; // overflow => remove from empty map
switch (removal_type) {
.swap => {
if (last_index != entry_index) {
// Because of the swap remove, now we need to update the index that was
// pointing to the last entry and is now pointing to this removed item slot.
self.updateEntryIndex(header, last_index, entry_index, ctx, I, indexes);
}
// updateEntryIndex reads from the old entry index,
// so it needs to run before removal.
self.entries.swapRemove(entry_index);
},
.ordered => {
var i: usize = entry_index;
while (i < last_index) : (i += 1) {
// Because of the ordered remove, everything from the entry index onwards has
// been shifted forward so we'll need to update the index entries.
self.updateEntryIndex(header, i + 1, i, ctx, I, indexes);
}
// updateEntryIndex reads from the old entry index,
// so it needs to run before removal.
self.entries.orderedRemove(entry_index);
},
}
return null;
}
fn updateEntryIndex(
@@ -809,116 +1307,188 @@ pub fn ArrayHashMapUnmanaged(
header: *IndexHeader,
old_entry_index: usize,
new_entry_index: usize,
ctx: ByIndexContext,
comptime I: type,
indexes: []Index(I),
) void {
const h = if (store_hash) self.entries.items[new_entry_index].hash else hash(self.entries.items[new_entry_index].key);
const start_index = header.constrainIndex(h);
var roll_over: usize = 0;
while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) {
const index_index = header.constrainIndex(start_index + roll_over);
const index = &indexes[index_index];
if (index.entry_index == old_entry_index) {
index.entry_index = @intCast(I, new_entry_index);
const slot = self.getSlotByIndex(old_entry_index, ctx, header, I, indexes);
indexes[slot].entry_index = @intCast(I, new_entry_index);
}
fn removeFromIndexByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader) void {
switch (header.capacityIndexType()) {
.u8 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u8, header.indexes(u8)),
.u16 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u16, header.indexes(u16)),
.u32 => self.removeFromIndexByIndexGeneric(entry_index, ctx, header, u32, header.indexes(u32)),
}
}
fn removeFromIndexByIndexGeneric(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
const slot = self.getSlotByIndex(entry_index, ctx, header, I, indexes);
self.removeSlot(slot, header, I, indexes);
}
fn removeFromIndexByKey(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize {
const slot = self.getSlotByKey(key, ctx, header, I, indexes) orelse return null;
const removed_entry_index = indexes[slot].entry_index;
self.removeSlot(slot, header, I, indexes);
return removed_entry_index;
}
fn removeSlot(self: *Self, removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
const start_index = removed_slot +% 1;
const end_index = start_index +% indexes.len;
var last_slot = removed_slot;
var index: usize = start_index;
while (index != end_index) : (index +%= 1) {
const slot = header.constrainIndex(index);
const slot_data = indexes[slot];
if (slot_data.isEmpty() or slot_data.distance_from_start_index == 0) {
indexes[last_slot].setEmpty();
return;
}
indexes[last_slot] = .{
.entry_index = slot_data.entry_index,
.distance_from_start_index = slot_data.distance_from_start_index - 1,
};
last_slot = slot;
}
unreachable;
}
fn getSlotByIndex(self: *Self, entry_index: usize, ctx: ByIndexContext, header: *IndexHeader, comptime I: type, indexes: []Index(I)) usize {
const slice = self.entries.slice();
const h = if (store_hash) slice.items(.hash)[entry_index]
else checkedHash(ctx, slice.items(.key)[entry_index]);
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
var index = start_index;
var distance_from_start_index: I = 0;
while (index != end_index) : ({
index +%= 1;
distance_from_start_index += 1;
}) {
const slot = header.constrainIndex(index);
const slot_data = indexes[slot];
// This is the fundamental property of the array hash map index. If this
// assert fails, it probably means that the entry was not in the index.
assert(!slot_data.isEmpty());
assert(slot_data.distance_from_start_index >= distance_from_start_index);
if (slot_data.entry_index == entry_index) {
return slot;
}
}
unreachable;
}
/// Must ensureCapacity before calling this.
fn getOrPutInternal(self: *Self, key: K, header: *IndexHeader, comptime I: type) GetOrPutResult {
fn getOrPutInternal(self: *Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type) GetOrPutResult {
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
const values_array = slice.items(.value);
const indexes = header.indexes(I);
const h = hash(key);
const start_index = header.constrainIndex(h);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
while (roll_over <= header.indexes_len) : ({
roll_over += 1;
const h = checkedHash(ctx, key);
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
var index = start_index;
var distance_from_start_index: I = 0;
while (index != end_index) : ({
index +%= 1;
distance_from_start_index += 1;
}) {
const index_index = header.constrainIndex(start_index + roll_over);
const index = indexes[index_index];
if (index.isEmpty()) {
indexes[index_index] = .{
.distance_from_start_index = @intCast(I, distance_from_start_index),
.entry_index = @intCast(I, self.entries.items.len),
};
header.maybeBumpMax(distance_from_start_index);
const new_entry = self.entries.addOneAssumeCapacity();
new_entry.* = .{
.hash = if (store_hash) h else {},
.key = key,
.value = undefined,
var slot = header.constrainIndex(index);
var slot_data = indexes[slot];
// If the slot is empty, there can be no more items in this run.
// We didn't find a matching item, so this must be new.
// Put it in the empty slot.
if (slot_data.isEmpty()) {
const new_index = self.entries.addOneAssumeCapacity();
indexes[slot] = .{
.distance_from_start_index = distance_from_start_index,
.entry_index = @intCast(I, new_index),
};
// update the hash if applicable
if (store_hash) hashes_array.ptr[new_index] = h;
return .{
.found_existing = false,
.entry = new_entry,
.index = self.entries.items.len - 1,
.key_ptr = &keys_array.ptr[new_index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array.ptr[new_index],
.index = new_index,
};
}
// This pointer survives the following append because we call
// entries.ensureCapacity before getOrPutInternal.
const entry = &self.entries.items[index.entry_index];
const hash_match = if (store_hash) h == entry.hash else true;
if (hash_match and eql(key, entry.key)) {
const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true;
if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index])) {
return .{
.found_existing = true,
.entry = entry,
.index = index.entry_index,
.key_ptr = &keys_array[slot_data.entry_index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array[slot_data.entry_index],
.index = slot_data.entry_index,
};
}
if (index.distance_from_start_index < distance_from_start_index) {
// If the entry is closer to its target than our current distance,
// the entry we are looking for does not exist. It would be in
// this slot instead if it was here. So stop looking, and switch
// to insert mode.
if (slot_data.distance_from_start_index < distance_from_start_index) {
// In this case, we did not find the item. We will put a new entry.
// However, we will use this index for the new entry, and move
// the previous index down the line, to keep the max_distance_from_start_index
// the previous index down the line, to keep the max distance_from_start_index
// as small as possible.
indexes[index_index] = .{
.distance_from_start_index = @intCast(I, distance_from_start_index),
.entry_index = @intCast(I, self.entries.items.len),
const new_index = self.entries.addOneAssumeCapacity();
if (store_hash) hashes_array.ptr[new_index] = h;
indexes[slot] = .{
.entry_index = @intCast(I, new_index),
.distance_from_start_index = distance_from_start_index,
};
header.maybeBumpMax(distance_from_start_index);
const new_entry = self.entries.addOneAssumeCapacity();
new_entry.* = .{
.hash = if (store_hash) h else {},
.key = key,
.value = undefined,
};
distance_from_start_index = index.distance_from_start_index;
var prev_entry_index = index.entry_index;
distance_from_start_index = slot_data.distance_from_start_index;
var displaced_index = slot_data.entry_index;
// Find somewhere to put the index we replaced by shifting
// following indexes backwards.
roll_over += 1;
index +%= 1;
distance_from_start_index += 1;
while (roll_over < header.indexes_len) : ({
roll_over += 1;
while (index != end_index) : ({
index +%= 1;
distance_from_start_index += 1;
}) {
const next_index_index = header.constrainIndex(start_index + roll_over);
const next_index = indexes[next_index_index];
if (next_index.isEmpty()) {
header.maybeBumpMax(distance_from_start_index);
indexes[next_index_index] = .{
.entry_index = prev_entry_index,
.distance_from_start_index = @intCast(I, distance_from_start_index),
slot = header.constrainIndex(index);
slot_data = indexes[slot];
if (slot_data.isEmpty()) {
indexes[slot] = .{
.entry_index = displaced_index,
.distance_from_start_index = distance_from_start_index,
};
return .{
.found_existing = false,
.entry = new_entry,
.index = self.entries.items.len - 1,
.key_ptr = &keys_array.ptr[new_index],
// workaround for #6974
.value_ptr = if (@sizeOf(*V) == 0) undefined else &values_array.ptr[new_index],
.index = new_index,
};
}
if (next_index.distance_from_start_index < distance_from_start_index) {
header.maybeBumpMax(distance_from_start_index);
indexes[next_index_index] = .{
.entry_index = prev_entry_index,
.distance_from_start_index = @intCast(I, distance_from_start_index),
if (slot_data.distance_from_start_index < distance_from_start_index) {
indexes[slot] = .{
.entry_index = displaced_index,
.distance_from_start_index = distance_from_start_index,
};
distance_from_start_index = next_index.distance_from_start_index;
prev_entry_index = next_index.entry_index;
displaced_index = slot_data.entry_index;
distance_from_start_index = slot_data.distance_from_start_index;
}
}
unreachable;
@@ -927,61 +1497,69 @@ pub fn ArrayHashMapUnmanaged(
unreachable;
}
fn getInternal(self: Self, key: K, header: *IndexHeader, comptime I: type) ?usize {
const indexes = header.indexes(I);
const h = hash(key);
const start_index = header.constrainIndex(h);
var roll_over: usize = 0;
while (roll_over <= header.max_distance_from_start_index) : (roll_over += 1) {
const index_index = header.constrainIndex(start_index + roll_over);
const index = indexes[index_index];
if (index.isEmpty())
fn getSlotByKey(self: Self, key: anytype, ctx: anytype, header: *IndexHeader, comptime I: type, indexes: []Index(I)) ?usize {
const slice = self.entries.slice();
const hashes_array = if (store_hash) slice.items(.hash) else {};
const keys_array = slice.items(.key);
const h = checkedHash(ctx, key);
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
var index = start_index;
var distance_from_start_index: I = 0;
while (index != end_index) : ({
index +%= 1;
distance_from_start_index += 1;
}) {
const slot = header.constrainIndex(index);
const slot_data = indexes[slot];
if (slot_data.isEmpty() or slot_data.distance_from_start_index < distance_from_start_index)
return null;
const entry = &self.entries.items[index.entry_index];
const hash_match = if (store_hash) h == entry.hash else true;
if (hash_match and eql(key, entry.key))
return index.entry_index;
const hash_match = if (store_hash) h == hashes_array[slot_data.entry_index] else true;
if (hash_match and checkedEql(ctx, key, keys_array[slot_data.entry_index]))
return slot;
}
return null;
unreachable;
}
fn insertAllEntriesIntoNewHeader(self: *Self, header: *IndexHeader) void {
fn insertAllEntriesIntoNewHeader(self: *Self, ctx: ByIndexContext, header: *IndexHeader) void {
switch (header.capacityIndexType()) {
.u8 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u8),
.u16 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u16),
.u32 => return self.insertAllEntriesIntoNewHeaderGeneric(header, u32),
.usize => return self.insertAllEntriesIntoNewHeaderGeneric(header, usize),
.u8 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u8),
.u16 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u16),
.u32 => return self.insertAllEntriesIntoNewHeaderGeneric(ctx, header, u32),
}
}
fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, header: *IndexHeader, comptime I: type) void {
fn insertAllEntriesIntoNewHeaderGeneric(self: *Self, ctx: ByIndexContext, header: *IndexHeader, comptime I: type) void {
const slice = self.entries.slice();
const items = if (store_hash) slice.items(.hash) else slice.items(.key);
const indexes = header.indexes(I);
entry_loop: for (self.entries.items) |entry, i| {
const h = if (store_hash) entry.hash else hash(entry.key);
const start_index = header.constrainIndex(h);
var entry_index = i;
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
while (roll_over < header.indexes_len) : ({
roll_over += 1;
entry_loop: for (items) |key, i| {
const h = if (store_hash) key else checkedHash(ctx, key);
const start_index = safeTruncate(usize, h);
const end_index = start_index +% indexes.len;
var index = start_index;
var entry_index = @intCast(I, i);
var distance_from_start_index: I = 0;
while (index != end_index) : ({
index +%= 1;
distance_from_start_index += 1;
}) {
const index_index = header.constrainIndex(start_index + roll_over);
const next_index = indexes[index_index];
const slot = header.constrainIndex(index);
const next_index = indexes[slot];
if (next_index.isEmpty()) {
header.maybeBumpMax(distance_from_start_index);
indexes[index_index] = .{
.distance_from_start_index = @intCast(I, distance_from_start_index),
.entry_index = @intCast(I, entry_index),
indexes[slot] = .{
.distance_from_start_index = distance_from_start_index,
.entry_index = entry_index,
};
continue :entry_loop;
}
if (next_index.distance_from_start_index < distance_from_start_index) {
header.maybeBumpMax(distance_from_start_index);
indexes[index_index] = .{
.distance_from_start_index = @intCast(I, distance_from_start_index),
.entry_index = @intCast(I, entry_index),
indexes[slot] = .{
.distance_from_start_index = distance_from_start_index,
.entry_index = entry_index,
};
distance_from_start_index = next_index.distance_from_start_index;
entry_index = next_index.entry_index;
@@ -990,98 +1568,255 @@ pub fn ArrayHashMapUnmanaged(
unreachable;
}
}
fn checkedHash(ctx: anytype, key: anytype) callconv(.Inline) u32 {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32);
// If you get a compile error on the next line, it means that
const hash = ctx.hash(key); // your generic hash function doesn't accept your key
if (@TypeOf(hash) != u32) {
@compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic hash function that returns the wrong type!\n"++
@typeName(u32)++" was expected, but found "++@typeName(@TypeOf(hash)));
}
return hash;
}
fn checkedEql(ctx: anytype, a: anytype, b: K) callconv(.Inline) bool {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32);
// If you get a compile error on the next line, it means that
const eql = ctx.eql(a, b); // your generic eql function doesn't accept (self, adapt key, K)
if (@TypeOf(eql) != bool) {
@compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic eql function that returns the wrong type!\n"++
@typeName(bool)++" was expected, but found "++@typeName(@TypeOf(eql)));
}
return eql;
}
fn dumpState(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call dumpStateContext instead.");
self.dumpStateContext(keyFmt, valueFmt, undefined);
}
fn dumpStateContext(self: Self, comptime keyFmt: []const u8, comptime valueFmt: []const u8, ctx: Context) void {
const p = std.debug.print;
p("{s}:\n", .{@typeName(Self)});
const slice = self.entries.slice();
const hash_status = if (store_hash) "stored" else "computed";
p(" len={} capacity={} hashes {s}\n", .{slice.len, slice.capacity, hash_status});
var i: usize = 0;
const mask: u32 = if (self.index_header) |header| header.mask() else ~@as(u32, 0);
while (i < slice.len) : (i += 1) {
const hash = if (store_hash) slice.items(.hash)[i]
else checkedHash(ctx, slice.items(.key)[i]);
if (store_hash) {
p(
" [{}]: key="++keyFmt++" value="++valueFmt++" hash=0x{x} slot=[0x{x}]\n",
.{i, slice.items(.key)[i], slice.items(.value)[i], hash, hash & mask},
);
} else {
p(
" [{}]: key="++keyFmt++" value="++valueFmt++" slot=[0x{x}]\n",
.{i, slice.items(.key)[i], slice.items(.value)[i], hash & mask},
);
}
}
if (self.index_header) |header| {
p("\n", .{});
switch (header.capacityIndexType()) {
.u8 => self.dumpIndex(header, u8),
.u16 => self.dumpIndex(header, u16),
.u32 => self.dumpIndex(header, u32),
}
}
}
fn dumpIndex(self: Self, header: *IndexHeader, comptime I: type) void {
const p = std.debug.print;
p(" index len=0x{x} type={}\n", .{header.length(), header.capacityIndexType()});
const indexes = header.indexes(I);
if (indexes.len == 0) return;
var is_empty = false;
for (indexes) |idx, i| {
if (idx.isEmpty()) {
is_empty = true;
} else {
if (is_empty) {
is_empty = false;
p(" ...\n", .{});
}
p(" [0x{x}]: [{}] +{}\n", .{i, idx.entry_index, idx.distance_from_start_index});
}
}
if (is_empty) {
p(" ...\n", .{});
}
}
};
}
const CapacityIndexType = enum { u8, u16, u32, usize };
const CapacityIndexType = enum { u8, u16, u32 };
fn capacityIndexType(indexes_len: usize) CapacityIndexType {
if (indexes_len < math.maxInt(u8))
fn capacityIndexType(bit_index: u8) CapacityIndexType {
if (bit_index <= 8)
return .u8;
if (indexes_len < math.maxInt(u16))
if (bit_index <= 16)
return .u16;
if (indexes_len < math.maxInt(u32))
return .u32;
return .usize;
assert(bit_index <= 32);
return .u32;
}
fn capacityIndexSize(indexes_len: usize) usize {
switch (capacityIndexType(indexes_len)) {
fn capacityIndexSize(bit_index: u8) usize {
switch (capacityIndexType(bit_index)) {
.u8 => return @sizeOf(Index(u8)),
.u16 => return @sizeOf(Index(u16)),
.u32 => return @sizeOf(Index(u32)),
.usize => return @sizeOf(Index(usize)),
}
}
/// @truncate fails if the target type is larger than the
/// target value. This causes problems when one of the types
/// is usize, which may be larger or smaller than u32 on different
/// systems. This version of truncate is safe to use if either
/// parameter has dynamic size, and will perform widening conversion
/// when needed. Both arguments must have the same signedness.
fn safeTruncate(comptime T: type, val: anytype) T {
if (@bitSizeOf(T) >= @bitSizeOf(@TypeOf(val)))
return val;
return @truncate(T, val);
}
/// A single entry in the lookup acceleration structure. These structs
/// are found in an array after the IndexHeader. Hashes index into this
/// array, and linear probing is used for collisions.
fn Index(comptime I: type) type {
return extern struct {
entry_index: I,
distance_from_start_index: I,
const Self = @This();
/// The index of this entry in the backing store. If the index is
/// empty, this is empty_sentinel.
entry_index: I,
/// The distance between this slot and its ideal placement. This is
/// used to keep maximum scan length small. This value is undefined
/// if the index is empty.
distance_from_start_index: I,
/// The special entry_index value marking an empty slot.
const empty_sentinel = ~@as(I, 0);
/// A constant empty index
const empty = Self{
.entry_index = math.maxInt(I),
.entry_index = empty_sentinel,
.distance_from_start_index = undefined,
};
/// Checks if a slot is empty
fn isEmpty(idx: Self) bool {
return idx.entry_index == math.maxInt(I);
return idx.entry_index == empty_sentinel;
}
/// Sets a slot to empty
fn setEmpty(idx: *Self) void {
idx.entry_index = math.maxInt(I);
idx.entry_index = empty_sentinel;
idx.distance_from_start_index = undefined;
}
};
}
/// This struct is trailed by an array of `Index(I)`, where `I`
/// and the array length are determined by `indexes_len`.
const IndexHeader = struct {
max_distance_from_start_index: usize,
indexes_len: usize,
/// the byte size of the index must fit in a usize. This is a power of two
/// length * the size of an Index(u32). The index is 8 bytes (3 bits repr)
/// and max_usize + 1 is not representable, so we need to subtract out 4 bits.
const max_representable_index_len = @bitSizeOf(usize) - 4;
const max_bit_index = math.min(32, max_representable_index_len);
const min_bit_index = 5;
const max_capacity = (1 << max_bit_index) - 1;
const index_capacities = blk: {
var caps: [max_bit_index + 1]u32 = undefined;
for (caps[0..max_bit_index]) |*item, i| {
item.* = (1<<i) * 3 / 5;
}
caps[max_bit_index] = max_capacity;
break :blk caps;
};
/// This struct is trailed by two arrays of length indexes_len
/// of integers, whose integer size is determined by indexes_len.
/// These arrays are indexed by constrainIndex(hash). The
/// entryIndexes array contains the index in the dense backing store
/// where the entry's data can be found. Entries which are not in
/// use have their index value set to emptySentinel(I).
/// The entryDistances array stores the distance between an entry
/// and its ideal hash bucket. This is used when adding elements
/// to balance the maximum scan length.
const IndexHeader = struct {
/// This field tracks the total number of items in the arrays following
/// this header. It is the bit index of the power of two number of indices.
/// This value is between min_bit_index and max_bit_index, inclusive.
bit_index: u8 align(@alignOf(u32)),
/// Map from an incrementing index to an index slot in the attached arrays.
fn constrainIndex(header: IndexHeader, i: usize) usize {
// This is an optimization for modulo of power of two integers;
// it requires `indexes_len` to always be a power of two.
return i & (header.indexes_len - 1);
return @intCast(usize, i & header.mask());
}
/// Returns the attached array of indexes. I must match the type
/// returned by capacityIndexType.
fn indexes(header: *IndexHeader, comptime I: type) []Index(I) {
const start = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader));
return start[0..header.indexes_len];
const start_ptr = @ptrCast([*]Index(I), @ptrCast([*]u8, header) + @sizeOf(IndexHeader));
return start_ptr[0..header.length()];
}
/// Returns the type used for the index arrays.
fn capacityIndexType(header: IndexHeader) CapacityIndexType {
return hash_map.capacityIndexType(header.indexes_len);
return hash_map.capacityIndexType(header.bit_index);
}
fn maybeBumpMax(header: *IndexHeader, distance_from_start_index: usize) void {
if (distance_from_start_index > header.max_distance_from_start_index) {
header.max_distance_from_start_index = distance_from_start_index;
}
fn capacity(self: IndexHeader) u32 {
return index_capacities[self.bit_index];
}
fn length(self: IndexHeader) usize {
return @as(usize, 1) << @intCast(math.Log2Int(usize), self.bit_index);
}
fn mask(self: IndexHeader) u32 {
return @intCast(u32, self.length() - 1);
}
fn alloc(allocator: *Allocator, len: usize) !*IndexHeader {
const index_size = hash_map.capacityIndexSize(len);
fn findBitIndex(desired_capacity: usize) !u8 {
if (desired_capacity > max_capacity) return error.OutOfMemory;
var new_bit_index = @intCast(u8, std.math.log2_int_ceil(usize, desired_capacity));
if (desired_capacity > index_capacities[new_bit_index]) new_bit_index += 1;
if (new_bit_index < min_bit_index) new_bit_index = min_bit_index;
assert(desired_capacity <= index_capacities[new_bit_index]);
return new_bit_index;
}
/// Allocates an index header, and fills the entryIndexes array with empty.
/// The distance array contents are undefined.
fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader {
const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
const index_size = hash_map.capacityIndexSize(new_bit_index);
const nbytes = @sizeOf(IndexHeader) + index_size * len;
const bytes = try allocator.allocAdvanced(u8, @alignOf(IndexHeader), nbytes, .exact);
@memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader));
const result = @ptrCast(*IndexHeader, bytes.ptr);
result.* = .{
.max_distance_from_start_index = 0,
.indexes_len = len,
.bit_index = new_bit_index,
};
return result;
}
/// Releases the memory for a header and its associated arrays.
fn free(header: *IndexHeader, allocator: *Allocator) void {
const index_size = hash_map.capacityIndexSize(header.indexes_len);
const ptr = @ptrCast([*]u8, header);
const slice = ptr[0 .. @sizeOf(IndexHeader) + header.indexes_len * index_size];
const index_size = hash_map.capacityIndexSize(header.bit_index);
const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
allocator.free(slice);
}
// Verify that the header has sufficient alignment to produce aligned arrays.
comptime {
if (@alignOf(u32) > @alignOf(IndexHeader))
@compileError("IndexHeader must have a larger alignment than its indexes!");
}
};
test "basic hash map usage" {
@@ -1099,31 +1834,32 @@ test "basic hash map usage" {
const gop1 = try map.getOrPut(5);
try testing.expect(gop1.found_existing == true);
try testing.expect(gop1.entry.value == 55);
try testing.expect(gop1.value_ptr.* == 55);
try testing.expect(gop1.index == 4);
gop1.entry.value = 77;
try testing.expect(map.getEntry(5).?.value == 77);
gop1.value_ptr.* = 77;
try testing.expect(map.getEntry(5).?.value_ptr.* == 77);
const gop2 = try map.getOrPut(99);
try testing.expect(gop2.found_existing == false);
try testing.expect(gop2.index == 5);
gop2.entry.value = 42;
try testing.expect(map.getEntry(99).?.value == 42);
gop2.value_ptr.* = 42;
try testing.expect(map.getEntry(99).?.value_ptr.* == 42);
const gop3 = try map.getOrPutValue(5, 5);
try testing.expect(gop3.value == 77);
try testing.expect(gop3.value_ptr.* == 77);
const gop4 = try map.getOrPutValue(100, 41);
try testing.expect(gop4.value == 41);
try testing.expect(gop4.value_ptr.* == 41);
try testing.expect(map.contains(2));
try testing.expect(map.getEntry(2).?.value == 22);
try testing.expect(map.getEntry(2).?.value_ptr.* == 22);
try testing.expect(map.get(2).? == 22);
const rmv1 = map.swapRemove(2);
const rmv1 = map.fetchSwapRemove(2);
try testing.expect(rmv1.?.key == 2);
try testing.expect(rmv1.?.value == 22);
try testing.expect(map.swapRemove(2) == null);
try testing.expect(map.fetchSwapRemove(2) == null);
try testing.expect(map.swapRemove(2) == false);
try testing.expect(map.getEntry(2) == null);
try testing.expect(map.get(2) == null);
@@ -1131,22 +1867,23 @@ test "basic hash map usage" {
try testing.expect(map.getIndex(100).? == 1);
const gop5 = try map.getOrPut(5);
try testing.expect(gop5.found_existing == true);
try testing.expect(gop5.entry.value == 77);
try testing.expect(gop5.value_ptr.* == 77);
try testing.expect(gop5.index == 4);
// Whereas, if we do an `orderedRemove`, it should move the index forward one spot.
const rmv2 = map.orderedRemove(100);
const rmv2 = map.fetchOrderedRemove(100);
try testing.expect(rmv2.?.key == 100);
try testing.expect(rmv2.?.value == 41);
try testing.expect(map.orderedRemove(100) == null);
try testing.expect(map.fetchOrderedRemove(100) == null);
try testing.expect(map.orderedRemove(100) == false);
try testing.expect(map.getEntry(100) == null);
try testing.expect(map.get(100) == null);
const gop6 = try map.getOrPut(5);
try testing.expect(gop6.found_existing == true);
try testing.expect(gop6.entry.value == 77);
try testing.expect(gop6.value_ptr.* == 77);
try testing.expect(gop6.index == 3);
map.removeAssertDiscard(3);
try testing.expect(map.swapRemove(3));
}
test "iterator hash map" {
@@ -1154,7 +1891,7 @@ test "iterator hash map" {
defer reset_map.deinit();
// test ensureCapacity with a 0 parameter
try reset_map.ensureCapacity(0);
try reset_map.ensureTotalCapacity(0);
try reset_map.putNoClobber(0, 11);
try reset_map.putNoClobber(1, 22);
@@ -1178,7 +1915,7 @@ test "iterator hash map" {
var count: usize = 0;
while (it.next()) |entry| : (count += 1) {
buffer[@intCast(usize, entry.key)] = entry.value;
buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*;
}
try testing.expect(count == 3);
try testing.expect(it.next() == null);
@@ -1190,7 +1927,7 @@ test "iterator hash map" {
it.reset();
count = 0;
while (it.next()) |entry| {
buffer[@intCast(usize, entry.key)] = entry.value;
buffer[@intCast(usize, entry.key_ptr.*)] = entry.value_ptr.*;
count += 1;
if (count >= 2) break;
}
@@ -1201,15 +1938,15 @@ test "iterator hash map" {
it.reset();
var entry = it.next().?;
try testing.expect(entry.key == first_entry.key);
try testing.expect(entry.value == first_entry.value);
try testing.expect(entry.key_ptr.* == first_entry.key_ptr.*);
try testing.expect(entry.value_ptr.* == first_entry.value_ptr.*);
}
test "ensure capacity" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
try map.ensureCapacity(20);
try map.ensureTotalCapacity(20);
const initial_capacity = map.capacity();
try testing.expect(initial_capacity >= 20);
var i: i32 = 0;
@@ -1220,6 +1957,59 @@ test "ensure capacity" {
try testing.expect(initial_capacity == map.capacity());
}
test "big map" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
var i: i32 = 0;
while (i < 8) : (i += 1) {
try map.put(i, i + 10);
}
i = 0;
while (i < 8) : (i += 1) {
try testing.expectEqual(@as(?i32, i + 10), map.get(i));
}
while (i < 16) : (i += 1) {
try testing.expectEqual(@as(?i32, null), map.get(i));
}
i = 4;
while (i < 12) : (i += 1) {
try map.put(i, i + 12);
}
i = 0;
while (i < 4) : (i += 1) {
try testing.expectEqual(@as(?i32, i + 10), map.get(i));
}
while (i < 12) : (i += 1) {
try testing.expectEqual(@as(?i32, i + 12), map.get(i));
}
while (i < 16) : (i += 1) {
try testing.expectEqual(@as(?i32, null), map.get(i));
}
i = 0;
while (i < 4) : (i += 1) {
try testing.expect(map.orderedRemove(i));
}
while (i < 8) : (i += 1) {
try testing.expect(map.swapRemove(i));
}
i = 0;
while (i < 8) : (i += 1) {
try testing.expectEqual(@as(?i32, null), map.get(i));
}
while (i < 12) : (i += 1) {
try testing.expectEqual(@as(?i32, i + 12), map.get(i));
}
while (i < 16) : (i += 1) {
try testing.expectEqual(@as(?i32, null), map.get(i));
}
}
test "clone" {
var original = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
defer original.deinit();
@@ -1235,7 +2025,14 @@ test "clone" {
i = 0;
while (i < 10) : (i += 1) {
try testing.expect(original.get(i).? == i * 10);
try testing.expect(copy.get(i).? == i * 10);
try testing.expect(original.getPtr(i).? != copy.getPtr(i).?);
}
while (i < 20) : (i += 1) {
try testing.expect(original.get(i) == null);
try testing.expect(copy.get(i) == null);
}
}
@@ -1261,7 +2058,7 @@ test "shrink" {
const gop = try map.getOrPut(i);
if (i < 17) {
try testing.expect(gop.found_existing == true);
try testing.expect(gop.entry.value == i * 10);
try testing.expect(gop.value_ptr.* == i * 10);
} else try testing.expect(gop.found_existing == false);
}
@@ -1274,7 +2071,7 @@ test "shrink" {
const gop = try map.getOrPut(i);
if (i < 15) {
try testing.expect(gop.found_existing == true);
try testing.expect(gop.entry.value == i * 10);
try testing.expect(gop.value_ptr.* == i * 10);
} else try testing.expect(gop.found_existing == false);
}
}
@@ -1298,7 +2095,7 @@ test "pop" {
}
test "reIndex" {
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
var map = ArrayHashMap(i32, i32, AutoContext(i32), true).init(std.testing.allocator);
defer map.deinit();
// Populate via the API.
@@ -1312,13 +2109,13 @@ test "reIndex" {
// Now write to the underlying array list directly.
const num_unindexed_entries = 20;
const hash = getAutoHashFn(i32);
const hash = getAutoHashFn(i32, void);
var al = &map.unmanaged.entries;
while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) {
try al.append(std.testing.allocator, .{
.key = i,
.value = i * 10,
.hash = {},
.hash = hash({}, i),
});
}
@@ -1328,36 +2125,7 @@ test "reIndex" {
while (i < num_indexed_entries + num_unindexed_entries) : (i += 1) {
const gop = try map.getOrPut(i);
try testing.expect(gop.found_existing == true);
try testing.expect(gop.entry.value == i * 10);
try testing.expect(gop.index == i);
}
}
test "fromOwnedArrayList" {
const array_hash_map_type = AutoArrayHashMap(i32, i32);
var al = std.ArrayListUnmanaged(array_hash_map_type.Entry){};
const hash = getAutoHashFn(i32);
// Populate array list.
const num_entries = 20;
var i: i32 = 0;
while (i < num_entries) : (i += 1) {
try al.append(std.testing.allocator, .{
.key = i,
.value = i * 10,
.hash = {},
});
}
// Now instantiate using `fromOwnedArrayList`.
var map = try array_hash_map_type.fromOwnedArrayList(std.testing.allocator, al);
defer map.deinit();
i = 0;
while (i < num_entries) : (i += 1) {
const gop = try map.getOrPut(i);
try testing.expect(gop.found_existing == true);
try testing.expect(gop.entry.value == i * 10);
try testing.expect(gop.value_ptr.* == i * 10);
try testing.expect(gop.index == i);
}
}
@@ -1365,34 +2133,52 @@ test "fromOwnedArrayList" {
test "auto store_hash" {
const HasCheapEql = AutoArrayHashMap(i32, i32);
const HasExpensiveEql = AutoArrayHashMap([32]i32, i32);
try testing.expect(meta.fieldInfo(HasCheapEql.Entry, .hash).field_type == void);
try testing.expect(meta.fieldInfo(HasExpensiveEql.Entry, .hash).field_type != void);
try testing.expect(meta.fieldInfo(HasCheapEql.Data, .hash).field_type == void);
try testing.expect(meta.fieldInfo(HasExpensiveEql.Data, .hash).field_type != void);
const HasCheapEqlUn = AutoArrayHashMapUnmanaged(i32, i32);
const HasExpensiveEqlUn = AutoArrayHashMapUnmanaged([32]i32, i32);
try testing.expect(meta.fieldInfo(HasCheapEqlUn.Entry, .hash).field_type == void);
try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Entry, .hash).field_type != void);
try testing.expect(meta.fieldInfo(HasCheapEqlUn.Data, .hash).field_type == void);
try testing.expect(meta.fieldInfo(HasExpensiveEqlUn.Data, .hash).field_type != void);
}
pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) {
test "compile everything" {
std.testing.refAllDecls(AutoArrayHashMap(i32, i32));
std.testing.refAllDecls(StringArrayHashMap([]const u8));
std.testing.refAllDecls(AutoArrayHashMap(i32, void));
std.testing.refAllDecls(StringArrayHashMap(u0));
std.testing.refAllDecls(AutoArrayHashMapUnmanaged(i32, i32));
std.testing.refAllDecls(StringArrayHashMapUnmanaged([]const u8));
std.testing.refAllDecls(AutoArrayHashMapUnmanaged(i32, void));
std.testing.refAllDecls(StringArrayHashMapUnmanaged(u0));
}
pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
return struct {
fn hash(key: K) u32 {
return getAutoHashFn(usize)(@ptrToInt(key));
fn hash(ctx: Context, key: K) u32 {
return getAutoHashFn(usize, void)({}, @ptrToInt(key));
}
}.hash;
}
pub fn getTrivialEqlFn(comptime K: type) (fn (K, K) bool) {
pub fn getTrivialEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
return struct {
fn eql(a: K, b: K) bool {
fn eql(ctx: Context, a: K, b: K) bool {
return a == b;
}
}.eql;
}
pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
pub fn AutoContext(comptime K: type) type {
return struct {
fn hash(key: K) u32 {
pub const hash = getAutoHashFn(K, @This());
pub const eql = getAutoEqlFn(K, @This());
};
}
pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
return struct {
fn hash(ctx: Context, key: K) u32 {
if (comptime trait.hasUniqueRepresentation(K)) {
return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key)));
} else {
@@ -1404,9 +2190,9 @@ pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
}.hash;
}
pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
return struct {
fn eql(a: K, b: K) bool {
fn eql(ctx: Context, a: K, b: K) bool {
return meta.eql(a, b);
}
}.eql;
@@ -1430,9 +2216,9 @@ pub fn autoEqlIsCheap(comptime K: type) bool {
};
}
pub fn getAutoHashStratFn(comptime K: type, comptime strategy: std.hash.Strategy) (fn (K) u32) {
pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime strategy: std.hash.Strategy) (fn (Context, K) u32) {
return struct {
fn hash(key: K) u32 {
fn hash(ctx: Context, key: K) u32 {
var hasher = Wyhash.init(0);
std.hash.autoHashStrat(&hasher, key, strategy);
return @truncate(u32, hasher.final());
+42 -25
View File
@@ -16,65 +16,82 @@ pub const BufMap = struct {
const BufMapHashMap = StringHashMap([]const u8);
/// Create a BufMap backed by a specific allocator.
/// That allocator will be used for both backing allocations
/// and string deduplication.
pub fn init(allocator: *Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
/// Free the backing storage of the map, as well as all
/// of the stored keys and values.
pub fn deinit(self: *BufMap) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() orelse break;
self.free(entry.key);
self.free(entry.value);
while (it.next()) |entry| {
self.free(entry.key_ptr.*);
self.free(entry.value_ptr.*);
}
self.hash_map.deinit();
}
/// Same as `set` but the key and value become owned by the BufMap rather
/// Same as `put` but the key and value become owned by the BufMap rather
/// than being copied.
/// If `setMove` fails, the ownership of key and value does not transfer.
pub fn setMove(self: *BufMap, key: []u8, value: []u8) !void {
/// If `putMove` fails, the ownership of key and value does not transfer.
pub fn putMove(self: *BufMap, key: []u8, value: []u8) !void {
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.entry.key);
self.free(get_or_put.entry.value);
get_or_put.entry.key = key;
self.free(get_or_put.key_ptr.*);
self.free(get_or_put.value_ptr.*);
get_or_put.key_ptr.* = key;
}
get_or_put.entry.value = value;
get_or_put.value_ptr.* = value;
}
/// `key` and `value` are copied into the BufMap.
pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
pub fn put(self: *BufMap, key: []const u8, value: []const u8) !void {
const value_copy = try self.copy(value);
errdefer self.free(value_copy);
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.entry.value);
self.free(get_or_put.value_ptr.*);
} else {
get_or_put.entry.key = self.copy(key) catch |err| {
get_or_put.key_ptr.* = self.copy(key) catch |err| {
_ = self.hash_map.remove(key);
return err;
};
}
get_or_put.entry.value = value_copy;
get_or_put.value_ptr.* = value_copy;
}
/// Find the address of the value associated with a key.
/// The returned pointer is invalidated if the map resizes.
pub fn getPtr(self: BufMap, key: []const u8) ?*[]const u8 {
return self.hash_map.getPtr(key);
}
/// Return the map's copy of the value associated with
/// a key. The returned string is invalidated if this
/// key is removed from the map.
pub fn get(self: BufMap, key: []const u8) ?[]const u8 {
return self.hash_map.get(key);
}
pub fn delete(self: *BufMap, key: []const u8) void {
const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
self.free(entry.value);
/// Removes the item from the map and frees its value.
/// This invalidates the value returned by get() for this key.
pub fn remove(self: *BufMap, key: []const u8) void {
const kv = self.hash_map.fetchRemove(key) orelse return;
self.free(kv.key);
self.free(kv.value);
}
/// Returns the number of KV pairs stored in the map.
pub fn count(self: BufMap) usize {
return self.hash_map.count();
}
/// Returns an iterator over entries in the map.
pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
@@ -93,21 +110,21 @@ test "BufMap" {
var bufmap = BufMap.init(allocator);
defer bufmap.deinit();
try bufmap.set("x", "1");
try bufmap.put("x", "1");
try testing.expect(mem.eql(u8, bufmap.get("x").?, "1"));
try testing.expect(1 == bufmap.count());
try bufmap.set("x", "2");
try bufmap.put("x", "2");
try testing.expect(mem.eql(u8, bufmap.get("x").?, "2"));
try testing.expect(1 == bufmap.count());
try bufmap.set("x", "3");
try bufmap.put("x", "3");
try testing.expect(mem.eql(u8, bufmap.get("x").?, "3"));
try testing.expect(1 == bufmap.count());
bufmap.delete("x");
bufmap.remove("x");
try testing.expect(0 == bufmap.count());
try bufmap.setMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v1"));
try bufmap.setMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v2"));
try bufmap.putMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v1"));
try bufmap.putMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v2"));
}
+39 -20
View File
@@ -9,50 +9,69 @@ const mem = @import("mem.zig");
const Allocator = mem.Allocator;
const testing = std.testing;
/// A BufSet is a set of strings. The BufSet duplicates
/// strings internally, and never takes ownership of strings
/// which are passed to it.
pub const BufSet = struct {
hash_map: BufSetHashMap,
const BufSetHashMap = StringHashMap(void);
pub const Iterator = BufSetHashMap.KeyIterator;
/// Create a BufSet using an allocator. The allocator will
/// be used internally for both backing allocations and
/// string duplication.
pub fn init(a: *Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
/// Free a BufSet along with all stored keys.
pub fn deinit(self: *BufSet) void {
var it = self.hash_map.iterator();
while (it.next()) |entry| {
self.free(entry.key);
var it = self.hash_map.keyIterator();
while (it.next()) |key_ptr| {
self.free(key_ptr.*);
}
self.hash_map.deinit();
self.* = undefined;
}
pub fn put(self: *BufSet, key: []const u8) !void {
if (self.hash_map.get(key) == null) {
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
try self.hash_map.put(key_copy, {});
/// Insert an item into the BufSet. The item will be
/// copied, so the caller may delete or reuse the
/// passed string immediately.
pub fn insert(self: *BufSet, value: []const u8) !void {
const gop = try self.hash_map.getOrPut(value);
if (!gop.found_existing) {
gop.key_ptr.* = self.copy(value) catch |err| {
_ = self.hash_map.remove(value);
return err;
};
}
}
pub fn exists(self: BufSet, key: []const u8) bool {
return self.hash_map.get(key) != null;
/// Check if the set contains an item matching the passed string
pub fn contains(self: BufSet, value: []const u8) bool {
return self.hash_map.contains(value);
}
pub fn delete(self: *BufSet, key: []const u8) void {
const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
/// Remove an item from the set.
pub fn remove(self: *BufSet, value: []const u8) void {
const kv = self.hash_map.fetchRemove(value) orelse return;
self.free(kv.key);
}
/// Returns the number of items stored in the set
pub fn count(self: *const BufSet) usize {
return self.hash_map.count();
}
pub fn iterator(self: *const BufSet) BufSetHashMap.Iterator {
return self.hash_map.iterator();
/// Returns an iterator over the items stored in the set.
/// Iteration order is arbitrary.
pub fn iterator(self: *const BufSet) Iterator {
return self.hash_map.keyIterator();
}
/// Get the allocator used by this set
pub fn allocator(self: *const BufSet) *Allocator {
return self.hash_map.allocator;
}
@@ -72,12 +91,12 @@ test "BufSet" {
var bufset = BufSet.init(std.testing.allocator);
defer bufset.deinit();
try bufset.put("x");
try bufset.insert("x");
try testing.expect(bufset.count() == 1);
bufset.delete("x");
bufset.remove("x");
try testing.expect(bufset.count() == 0);
try bufset.put("x");
try bufset.put("y");
try bufset.put("z");
try bufset.insert("x");
try bufset.insert("y");
try bufset.insert("z");
}
+21 -21
View File
@@ -504,10 +504,10 @@ pub const Builder = struct {
}
self.available_options_list.append(available_option) catch unreachable;
const entry = self.user_input_options.getEntry(name) orelse return null;
entry.value.used = true;
const option_ptr = self.user_input_options.getPtr(name) orelse return null;
option_ptr.used = true;
switch (type_id) {
.Bool => switch (entry.value.value) {
.Bool => switch (option_ptr.value) {
.Flag => return true,
.Scalar => |s| {
if (mem.eql(u8, s, "true")) {
@@ -526,7 +526,7 @@ pub const Builder = struct {
return null;
},
},
.Int => switch (entry.value.value) {
.Int => switch (option_ptr.value) {
.Flag => {
warn("Expected -D{s} to be an integer, but received a boolean.\n\n", .{name});
self.markInvalidUserInput();
@@ -553,7 +553,7 @@ pub const Builder = struct {
return null;
},
},
.Float => switch (entry.value.value) {
.Float => switch (option_ptr.value) {
.Flag => {
warn("Expected -D{s} to be a float, but received a boolean.\n\n", .{name});
self.markInvalidUserInput();
@@ -573,7 +573,7 @@ pub const Builder = struct {
return null;
},
},
.Enum => switch (entry.value.value) {
.Enum => switch (option_ptr.value) {
.Flag => {
warn("Expected -D{s} to be a string, but received a boolean.\n\n", .{name});
self.markInvalidUserInput();
@@ -594,7 +594,7 @@ pub const Builder = struct {
return null;
},
},
.String => switch (entry.value.value) {
.String => switch (option_ptr.value) {
.Flag => {
warn("Expected -D{s} to be a string, but received a boolean.\n\n", .{name});
self.markInvalidUserInput();
@@ -607,7 +607,7 @@ pub const Builder = struct {
},
.Scalar => |s| return s,
},
.List => switch (entry.value.value) {
.List => switch (option_ptr.value) {
.Flag => {
warn("Expected -D{s} to be a list, but received a boolean.\n\n", .{name});
self.markInvalidUserInput();
@@ -769,7 +769,7 @@ pub const Builder = struct {
const value = self.dupe(value_raw);
const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.entry.value = UserInputOption{
gop.value_ptr.* = UserInputOption{
.name = name,
.value = UserValue{ .Scalar = value },
.used = false,
@@ -778,7 +778,7 @@ pub const Builder = struct {
}
// option already exists
switch (gop.entry.value.value) {
switch (gop.value_ptr.value) {
UserValue.Scalar => |s| {
// turn it into a list
var list = ArrayList([]const u8).init(self.allocator);
@@ -811,7 +811,7 @@ pub const Builder = struct {
const name = self.dupe(name_raw);
const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.entry.value = UserInputOption{
gop.value_ptr.* = UserInputOption{
.name = name,
.value = UserValue{ .Flag = {} },
.used = false,
@@ -820,7 +820,7 @@ pub const Builder = struct {
}
// option already exists
switch (gop.entry.value.value) {
switch (gop.value_ptr.value) {
UserValue.Scalar => |s| {
warn("Flag '-D{s}' conflicts with option '-D{s}={s}'.\n", .{ name, name, s });
return true;
@@ -866,10 +866,9 @@ pub const Builder = struct {
pub fn validateUserInputDidItFail(self: *Builder) bool {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
const entry = it.next() orelse break;
if (!entry.value.used) {
warn("Invalid option: -D{s}\n\n", .{entry.key});
while (it.next()) |entry| {
if (!entry.value_ptr.used) {
warn("Invalid option: -D{s}\n\n", .{entry.key_ptr.*});
self.markInvalidUserInput();
}
}
@@ -1653,7 +1652,8 @@ pub const LibExeObjStep = struct {
pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void {
assert(self.target.isDarwin());
self.frameworks.put(self.builder.dupe(framework_name)) catch unreachable;
// Note: No need to dupe because frameworks dupes internally.
self.frameworks.insert(framework_name) catch unreachable;
}
/// Returns whether the library, executable, or object depends on a particular system library.
@@ -2155,8 +2155,8 @@ pub const LibExeObjStep = struct {
// Inherit dependencies on darwin frameworks
if (self.target.isDarwin() and !other.isDynamicLibrary()) {
var it = other.frameworks.iterator();
while (it.next()) |entry| {
self.frameworks.put(entry.key) catch unreachable;
while (it.next()) |framework| {
self.frameworks.insert(framework.*) catch unreachable;
}
}
}
@@ -2591,9 +2591,9 @@ pub const LibExeObjStep = struct {
}
var it = self.frameworks.iterator();
while (it.next()) |entry| {
while (it.next()) |framework| {
zig_args.append("-framework") catch unreachable;
zig_args.append(entry.key) catch unreachable;
zig_args.append(framework.*) catch unreachable;
}
}
+4 -6
View File
@@ -117,9 +117,9 @@ pub const RunStep = struct {
if (prev_path) |pp| {
const new_path = self.builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
env_map.set(key, new_path) catch unreachable;
env_map.put(key, new_path) catch unreachable;
} else {
env_map.set(key, self.builder.dupePath(search_path)) catch unreachable;
env_map.put(key, self.builder.dupePath(search_path)) catch unreachable;
}
}
@@ -134,10 +134,8 @@ pub const RunStep = struct {
pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8) void {
const env_map = self.getEnvMap();
env_map.set(
self.builder.dupe(key),
self.builder.dupe(value),
) catch unreachable;
// Note: no need to dupe these strings because BufMap does it internally.
env_map.put(key, value) catch unreachable;
}
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
+12 -12
View File
@@ -955,7 +955,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
while (it.next()) |pair| {
// +1 for '='
// +1 for null byte
max_chars_needed += pair.key.len + pair.value.len + 2;
max_chars_needed += pair.key_ptr.len + pair.value_ptr.len + 2;
}
break :x max_chars_needed;
};
@@ -965,10 +965,10 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
var it = env_map.iterator();
var i: usize = 0;
while (it.next()) |pair| {
i += try unicode.utf8ToUtf16Le(result[i..], pair.key);
i += try unicode.utf8ToUtf16Le(result[i..], pair.key_ptr.*);
result[i] = '=';
i += 1;
i += try unicode.utf8ToUtf16Le(result[i..], pair.value);
i += try unicode.utf8ToUtf16Le(result[i..], pair.value_ptr.*);
result[i] = 0;
i += 1;
}
@@ -990,10 +990,10 @@ pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufM
var it = env_map.iterator();
var i: usize = 0;
while (it.next()) |pair| : (i += 1) {
const env_buf = try arena.allocSentinel(u8, pair.key.len + pair.value.len + 1, 0);
mem.copy(u8, env_buf, pair.key);
env_buf[pair.key.len] = '=';
mem.copy(u8, env_buf[pair.key.len + 1 ..], pair.value);
const env_buf = try arena.allocSentinel(u8, pair.key_ptr.len + pair.value_ptr.len + 1, 0);
mem.copy(u8, env_buf, pair.key_ptr.*);
env_buf[pair.key_ptr.len] = '=';
mem.copy(u8, env_buf[pair.key_ptr.len + 1 ..], pair.value_ptr.*);
envp_buf[i] = env_buf.ptr;
}
assert(i == envp_count);
@@ -1007,11 +1007,11 @@ test "createNullDelimitedEnvMap" {
var envmap = BufMap.init(allocator);
defer envmap.deinit();
try envmap.set("HOME", "/home/ifreund");
try envmap.set("WAYLAND_DISPLAY", "wayland-1");
try envmap.set("DISPLAY", ":1");
try envmap.set("DEBUGINFOD_URLS", " ");
try envmap.set("XCURSOR_SIZE", "24");
try envmap.put("HOME", "/home/ifreund");
try envmap.put("WAYLAND_DISPLAY", "wayland-1");
try envmap.put("DISPLAY", ":1");
try envmap.put("DEBUGINFOD_URLS", " ");
try envmap.put("XCURSOR_SIZE", "24");
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
+58 -56
View File
@@ -165,11 +165,13 @@ pub fn Watch(comptime V: type) type {
.macos, .freebsd, .netbsd, .dragonfly, .openbsd => {
var it = self.os_data.file_table.iterator();
while (it.next()) |entry| {
entry.value.cancelled = true;
const key = entry.key_ptr.*;
const value = entry.value_ptr.*;
value.cancelled = true;
// @TODO Close the fd here?
await entry.value.putter_frame;
self.allocator.free(entry.key);
self.allocator.destroy(entry.value);
await value.putter_frame;
self.allocator.free(key);
self.allocator.destroy(value);
}
},
.linux => {
@@ -177,9 +179,9 @@ pub fn Watch(comptime V: type) type {
{
// Remove all directory watches linuxEventPutter will take care of
// cleaning up the memory and closing the inotify fd.
var dir_it = self.os_data.wd_table.iterator();
while (dir_it.next()) |wd_entry| {
const rc = os.linux.inotify_rm_watch(self.os_data.inotify_fd, wd_entry.key);
var dir_it = self.os_data.wd_table.keyIterator();
while (dir_it.next()) |wd_key| {
const rc = os.linux.inotify_rm_watch(self.os_data.inotify_fd, wd_key.*);
// Errno can only be EBADF, EINVAL if either the inotify fs or the wd are invalid
std.debug.assert(rc == 0);
}
@@ -202,13 +204,13 @@ pub fn Watch(comptime V: type) type {
await dir_entry.value.putter_frame;
}
self.allocator.free(dir_entry.key);
var file_it = dir_entry.value.file_table.iterator();
self.allocator.free(dir_entry.key_ptr.*);
var file_it = dir_entry.value.file_table.keyIterator();
while (file_it.next()) |file_entry| {
self.allocator.free(file_entry.key);
self.allocator.free(file_entry.*);
}
dir_entry.value.file_table.deinit(self.allocator);
self.allocator.destroy(dir_entry.value);
self.allocator.destroy(dir_entry.value_ptr.*);
}
self.os_data.dir_table.deinit(self.allocator);
},
@@ -236,18 +238,18 @@ pub fn Watch(comptime V: type) type {
defer held.release();
const gop = try self.os_data.file_table.getOrPut(self.allocator, realpath);
errdefer self.os_data.file_table.removeAssertDiscard(realpath);
errdefer assert(self.os_data.file_table.remove(realpath));
if (gop.found_existing) {
const prev_value = gop.entry.value.value;
gop.entry.value.value = value;
const prev_value = gop.value_ptr.value;
gop.value_ptr.value = value;
return prev_value;
}
gop.entry.key = try self.allocator.dupe(u8, realpath);
errdefer self.allocator.free(gop.entry.key);
gop.entry.value = try self.allocator.create(OsData.Put);
errdefer self.allocator.destroy(gop.entry.value);
gop.entry.value.* = .{
gop.key_ptr.* = try self.allocator.dupe(u8, realpath);
errdefer self.allocator.free(gop.key_ptr.*);
gop.value_ptr.* = try self.allocator.create(OsData.Put);
errdefer self.allocator.destroy(gop.value_ptr.*);
gop.value_ptr.* = .{
.putter_frame = undefined,
.value = value,
};
@@ -255,7 +257,7 @@ pub fn Watch(comptime V: type) type {
// @TODO Can I close this fd and get an error from bsdWaitKev?
const flags = if (comptime std.Target.current.isDarwin()) os.O_SYMLINK | os.O_EVTONLY else 0;
const fd = try os.open(realpath, flags, 0);
gop.entry.value.putter_frame = async self.kqPutEvents(fd, gop.entry.key, gop.entry.value);
gop.value_ptr.putter_frame = async self.kqPutEvents(fd, gop.key_ptr.*, gop.value_ptr.*);
return null;
}
@@ -345,24 +347,24 @@ pub fn Watch(comptime V: type) type {
defer held.release();
const gop = try self.os_data.wd_table.getOrPut(self.allocator, wd);
errdefer self.os_data.wd_table.removeAssertDiscard(wd);
errdefer assert(self.os_data.wd_table.remove(wd));
if (!gop.found_existing) {
gop.entry.value = OsData.Dir{
gop.value_ptr.* = OsData.Dir{
.dirname = try self.allocator.dupe(u8, dirname),
.file_table = OsData.FileTable.init(self.allocator),
};
}
const dir = &gop.entry.value;
const dir = gop.value_ptr;
const file_table_gop = try dir.file_table.getOrPut(self.allocator, basename);
errdefer dir.file_table.removeAssertDiscard(basename);
errdefer assert(dir.file_table.remove(basename));
if (file_table_gop.found_existing) {
const prev_value = file_table_gop.entry.value;
file_table_gop.entry.value = value;
const prev_value = file_table_gop.value_ptr.*;
file_table_gop.value_ptr.* = value;
return prev_value;
} else {
file_table_gop.entry.key = try self.allocator.dupe(u8, basename);
file_table_gop.entry.value = value;
file_table_gop.key_ptr.* = try self.allocator.dupe(u8, basename);
file_table_gop.value_ptr.* = value;
return null;
}
}
@@ -383,19 +385,19 @@ pub fn Watch(comptime V: type) type {
defer held.release();
const gop = try self.os_data.dir_table.getOrPut(self.allocator, dirname);
errdefer self.os_data.dir_table.removeAssertDiscard(dirname);
errdefer assert(self.os_data.dir_table.remove(dirname));
if (gop.found_existing) {
const dir = gop.entry.value;
const dir = gop.value_ptr.*;
const file_gop = try dir.file_table.getOrPut(self.allocator, basename);
errdefer dir.file_table.removeAssertDiscard(basename);
errdefer assert(dir.file_table.remove(basename));
if (file_gop.found_existing) {
const prev_value = file_gop.entry.value;
file_gop.entry.value = value;
const prev_value = file_gop.value_ptr.*;
file_gop.value_ptr.* = value;
return prev_value;
} else {
file_gop.entry.value = value;
file_gop.entry.key = try self.allocator.dupe(u8, basename);
file_gop.value_ptr.* = value;
file_gop.key_ptr.* = try self.allocator.dupe(u8, basename);
return null;
}
} else {
@@ -411,17 +413,17 @@ pub fn Watch(comptime V: type) type {
const dir = try self.allocator.create(OsData.Dir);
errdefer self.allocator.destroy(dir);
gop.entry.key = try self.allocator.dupe(u8, dirname);
errdefer self.allocator.free(gop.entry.key);
gop.key_ptr.* = try self.allocator.dupe(u8, dirname);
errdefer self.allocator.free(gop.key_ptr.*);
dir.* = OsData.Dir{
.file_table = OsData.FileTable.init(self.allocator),
.putter_frame = undefined,
.dir_handle = dir_handle,
};
gop.entry.value = dir;
gop.value_ptr.* = dir;
try dir.file_table.put(self.allocator, try self.allocator.dupe(u8, basename), value);
dir.putter_frame = async self.windowsDirReader(dir, gop.entry.key);
dir.putter_frame = async self.windowsDirReader(dir, gop.key_ptr.*);
return null;
}
}
@@ -501,9 +503,9 @@ pub fn Watch(comptime V: type) type {
if (dir.file_table.getEntry(basename)) |entry| {
self.channel.put(Event{
.id = id,
.data = entry.value,
.data = entry.value_ptr.*,
.dirname = dirname,
.basename = entry.key,
.basename = entry.key_ptr.*,
});
}
}
@@ -525,7 +527,7 @@ pub fn Watch(comptime V: type) type {
defer held.release();
const dir = self.os_data.wd_table.get(dirname) orelse return null;
if (dir.file_table.remove(basename)) |file_entry| {
if (dir.file_table.fetchRemove(basename)) |file_entry| {
self.allocator.free(file_entry.key);
return file_entry.value;
}
@@ -539,7 +541,7 @@ pub fn Watch(comptime V: type) type {
defer held.release();
const dir = self.os_data.dir_table.get(dirname) orelse return null;
if (dir.file_table.remove(basename)) |file_entry| {
if (dir.file_table.fetchRemove(basename)) |file_entry| {
self.allocator.free(file_entry.key);
return file_entry.value;
}
@@ -552,14 +554,14 @@ pub fn Watch(comptime V: type) type {
const held = self.os_data.table_lock.acquire();
defer held.release();
const entry = self.os_data.file_table.get(realpath) orelse return null;
entry.value.cancelled = true;
const entry = self.os_data.file_table.getEntry(realpath) orelse return null;
entry.value_ptr.cancelled = true;
// @TODO Close the fd here?
await entry.value.putter_frame;
self.allocator.free(entry.key);
self.allocator.destroy(entry.value);
await entry.value_ptr.putter_frame;
self.allocator.free(entry.key_ptr.*);
self.allocator.destroy(entry.value_ptr.*);
self.os_data.file_table.removeAssertDiscard(realpath);
assert(self.os_data.file_table.remove(realpath));
},
else => @compileError("Unsupported OS"),
}
@@ -594,19 +596,19 @@ pub fn Watch(comptime V: type) type {
if (dir.file_table.getEntry(basename)) |file_value| {
self.channel.put(Event{
.id = .CloseWrite,
.data = file_value.value,
.data = file_value.value_ptr.*,
.dirname = dir.dirname,
.basename = file_value.key,
.basename = file_value.key_ptr.*,
});
}
} else if (ev.mask & os.linux.IN_IGNORED == os.linux.IN_IGNORED) {
// Directory watch was removed
const held = self.os_data.table_lock.acquire();
defer held.release();
if (self.os_data.wd_table.remove(ev.wd)) |*wd_entry| {
var file_it = wd_entry.value.file_table.iterator();
if (self.os_data.wd_table.fetchRemove(ev.wd)) |wd_entry| {
var file_it = wd_entry.value.file_table.keyIterator();
while (file_it.next()) |file_entry| {
self.allocator.free(file_entry.key);
self.allocator.free(file_entry.*);
}
self.allocator.free(wd_entry.value.dirname);
wd_entry.value.file_table.deinit(self.allocator);
@@ -620,9 +622,9 @@ pub fn Watch(comptime V: type) type {
if (dir.file_table.getEntry(basename)) |file_value| {
self.channel.put(Event{
.id = .Delete,
.data = file_value.value,
.data = file_value.value_ptr.*,
.dirname = dir.dirname,
.basename = file_value.key,
.basename = file_value.key_ptr.*,
});
}
}
+872 -225
View File
@@ -15,7 +15,7 @@ const trait = meta.trait;
const Allocator = mem.Allocator;
const Wyhash = std.hash.Wyhash;
pub fn getAutoHashFn(comptime K: type) (fn (K) u64) {
pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u64) {
comptime {
assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
if (K == []const u8) {
@@ -28,7 +28,7 @@ pub fn getAutoHashFn(comptime K: type) (fn (K) u64) {
}
return struct {
fn hash(key: K) u64 {
fn hash(ctx: Context, key: K) u64 {
if (comptime trait.hasUniqueRepresentation(K)) {
return Wyhash.hash(0, std.mem.asBytes(&key));
} else {
@@ -40,31 +40,51 @@ pub fn getAutoHashFn(comptime K: type) (fn (K) u64) {
}.hash;
}
pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
return struct {
fn eql(a: K, b: K) bool {
fn eql(ctx: Context, a: K, b: K) bool {
return meta.eql(a, b);
}
}.eql;
}
pub fn AutoHashMap(comptime K: type, comptime V: type) type {
return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), default_max_load_percentage);
return HashMap(K, V, AutoContext(K), default_max_load_percentage);
}
pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
return HashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), default_max_load_percentage);
return HashMapUnmanaged(K, V, AutoContext(K), default_max_load_percentage);
}
pub fn AutoContext(comptime K: type) type {
return struct {
pub const hash = getAutoHashFn(K, @This());
pub const eql = getAutoEqlFn(K, @This());
};
}
/// Builtin hashmap for strings as keys.
/// Key memory is managed by the caller. Keys and values
/// will not automatically be freed.
pub fn StringHashMap(comptime V: type) type {
return HashMap([]const u8, V, hashString, eqlString, default_max_load_percentage);
return HashMap([]const u8, V, StringContext, default_max_load_percentage);
}
/// Key memory is managed by the caller. Keys and values
/// will not automatically be freed.
pub fn StringHashMapUnmanaged(comptime V: type) type {
return HashMapUnmanaged([]const u8, V, hashString, eqlString, default_max_load_percentage);
return HashMapUnmanaged([]const u8, V, StringContext, default_max_load_percentage);
}
pub const StringContext = struct {
pub fn hash(self: @This(), s: []const u8) u64 {
return hashString(s);
}
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
return eqlString(a, b);
}
};
pub fn eqlString(a: []const u8, b: []const u8) bool {
return mem.eql(u8, a, b);
}
@@ -78,6 +98,222 @@ pub const DefaultMaxLoadPercentage = default_max_load_percentage;
pub const default_max_load_percentage = 80;
/// This function issues a compile error with a helpful message if there
/// is a problem with the provided context type. A context must have the following
/// member functions:
/// - hash(self, PseudoKey) Hash
/// - eql(self, PseudoKey, Key) bool
/// If you are passing a context to a *Adapted function, PseudoKey is the type
/// of the key parameter. Otherwise, when creating a HashMap or HashMapUnmanaged
/// type, PseudoKey = Key = K.
pub fn verifyContext(comptime RawContext: type, comptime PseudoKey: type, comptime Key: type, comptime Hash: type) void {
comptime {
var allow_const_ptr = false;
var allow_mutable_ptr = false;
// Context is the actual namespace type. RawContext may be a pointer to Context.
var Context = RawContext;
// Make sure the context is a namespace type which may have member functions
switch (@typeInfo(Context)) {
.Struct, .Union, .Enum => {},
// Special-case .Opaque for a better error message
.Opaque => @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)++" because it is opaque. Use a pointer instead."),
.Pointer => |ptr| {
if (ptr.size != .One) {
@compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)++" because it is not a single pointer.");
}
Context = ptr.child;
allow_const_ptr = true;
allow_mutable_ptr = !ptr.is_const;
switch (@typeInfo(Context)) {
.Struct, .Union, .Enum, .Opaque => {},
else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)),
}
},
else => @compileError("Hash context must be a type with hash and eql member functions. Cannot use "++@typeName(Context)),
}
// Keep track of multiple errors so we can report them all.
var errors: []const u8 = "";
// Put common errors here, they will only be evaluated
// if the error is actually triggered.
const lazy = struct {
const prefix = "\n ";
const deep_prefix = prefix ++ " ";
const hash_signature = "fn (self, "++@typeName(PseudoKey)++") "++@typeName(Hash);
const eql_signature = "fn (self, "++@typeName(PseudoKey)++", "++@typeName(Key)++") bool";
const err_invalid_hash_signature = prefix ++ @typeName(Context) ++ ".hash must be " ++ hash_signature ++
deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.hash));
const err_invalid_eql_signature = prefix ++ @typeName(Context) ++ ".eql must be " ++ eql_signature ++
deep_prefix ++ "but is actually " ++ @typeName(@TypeOf(Context.eql));
};
// Verify Context.hash(self, PseudoKey) => Hash
if (@hasDecl(Context, "hash")) {
const hash = Context.hash;
const info = @typeInfo(@TypeOf(hash));
if (info == .Fn) {
const func = info.Fn;
if (func.args.len != 2) {
errors = errors ++ lazy.err_invalid_hash_signature;
} else {
var emitted_signature = false;
if (func.args[0].arg_type) |Self| {
if (Self == Context) {
// pass, this is always fine.
} else if (Self == *const Context) {
if (!allow_const_ptr) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_hash_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self);
errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
}
} else if (Self == *Context) {
if (!allow_mutable_ptr) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_hash_signature;
emitted_signature = true;
}
if (!allow_const_ptr) {
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self);
errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
} else {
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++" or "++@typeName(*const Context)++", but is "++@typeName(Self);
errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer.";
}
}
} else {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_hash_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context);
if (allow_const_ptr) {
errors = errors++" or "++@typeName(*const Context);
if (allow_mutable_ptr) {
errors = errors++" or "++@typeName(*Context);
}
}
errors = errors++", but is "++@typeName(Self);
}
}
if (func.args[1].arg_type != null and func.args[1].arg_type.? != PseudoKey) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_hash_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "Second parameter must be "++@typeName(PseudoKey)++", but is "++@typeName(func.args[1].arg_type.?);
}
if (func.return_type != null and func.return_type.? != Hash) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_hash_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "Return type must be "++@typeName(Hash)++", but was "++@typeName(func.return_type.?);
}
// If any of these are generic (null), we cannot verify them.
// The call sites check the return type, but cannot check the
// parameters. This may cause compile errors with generic hash/eql functions.
}
} else {
errors = errors ++ lazy.err_invalid_hash_signature;
}
} else {
errors = errors ++ lazy.prefix ++ @typeName(Context) ++ " must declare a hash function with signature " ++ lazy.hash_signature;
}
// Verify Context.eql(self, PseudoKey, Key) => bool
if (@hasDecl(Context, "eql")) {
const eql = Context.eql;
const info = @typeInfo(@TypeOf(eql));
if (info == .Fn) {
const func = info.Fn;
if (func.args.len != 3) {
errors = errors ++ lazy.err_invalid_eql_signature;
} else {
var emitted_signature = false;
if (func.args[0].arg_type) |Self| {
if (Self == Context) {
// pass, this is always fine.
} else if (Self == *const Context) {
if (!allow_const_ptr) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self);
errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
}
} else if (Self == *Context) {
if (!allow_mutable_ptr) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
if (!allow_const_ptr) {
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++", but is "++@typeName(Self);
errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be a pointer because it is passed by value.";
} else {
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context)++" or "++@typeName(*const Context)++", but is "++@typeName(Self);
errors = errors ++ lazy.deep_prefix ++ "Note: Cannot be non-const because it is passed by const pointer.";
}
}
} else {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "First parameter must be "++@typeName(Context);
if (allow_const_ptr) {
errors = errors++" or "++@typeName(*const Context);
if (allow_mutable_ptr) {
errors = errors++" or "++@typeName(*Context);
}
}
errors = errors++", but is "++@typeName(Self);
}
}
if (func.args[1].arg_type.? != PseudoKey) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "Second parameter must be "++@typeName(PseudoKey)++", but is "++@typeName(func.args[1].arg_type.?);
}
if (func.args[2].arg_type.? != Key) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "Third parameter must be "++@typeName(Key)++", but is "++@typeName(func.args[2].arg_type.?);
}
if (func.return_type.? != bool) {
if (!emitted_signature) {
errors = errors ++ lazy.err_invalid_eql_signature;
emitted_signature = true;
}
errors = errors ++ lazy.deep_prefix ++ "Return type must be bool, but was "++@typeName(func.return_type.?);
}
// If any of these are generic (null), we cannot verify them.
// The call sites check the return type, but cannot check the
// parameters. This may cause compile errors with generic hash/eql functions.
}
} else {
errors = errors ++ lazy.err_invalid_eql_signature;
}
} else {
errors = errors ++ lazy.prefix ++ @typeName(Context) ++ " must declare a eql function with signature " ++ lazy.eql_signature;
}
if (errors.len != 0) {
// errors begins with a newline (from lazy.prefix)
@compileError("Problems found with hash context type "++@typeName(Context)++":"++errors);
}
}
}
/// General purpose hash table.
/// No order is guaranteed and any modification invalidates live iterators.
/// It provides fast operations (lookup, insertion, deletion) with quite high
@@ -86,83 +322,167 @@ pub const default_max_load_percentage = 80;
/// field, see `HashMapUnmanaged`.
/// If iterating over the table entries is a strong usecase and needs to be fast,
/// prefer the alternative `std.ArrayHashMap`.
/// Context must be a struct type with two member functions:
/// hash(self, K) u64
/// eql(self, K, K) bool
/// Adapted variants of many functions are provided. These variants
/// take a pseudo key instead of a key. Their context must have the functions:
/// hash(self, PseudoKey) u64
/// eql(self, PseudoKey, K) bool
pub fn HashMap(
comptime K: type,
comptime V: type,
comptime hashFn: fn (key: K) u64,
comptime eqlFn: fn (a: K, b: K) bool,
comptime Context: type,
comptime max_load_percentage: u64,
) type {
comptime verifyContext(Context, K, K, u64);
return struct {
unmanaged: Unmanaged,
allocator: *Allocator,
ctx: Context,
pub const Unmanaged = HashMapUnmanaged(K, V, hashFn, eqlFn, max_load_percentage);
/// The type of the unmanaged hash map underlying this wrapper
pub const Unmanaged = HashMapUnmanaged(K, V, Context, max_load_percentage);
/// An entry, containing pointers to a key and value stored in the map
pub const Entry = Unmanaged.Entry;
/// A copy of a key and value which are no longer in the map
pub const KV = Unmanaged.KV;
/// The integer type that is the result of hashing
pub const Hash = Unmanaged.Hash;
/// The iterator type returned by iterator()
pub const Iterator = Unmanaged.Iterator;
pub const KeyIterator = Unmanaged.KeyIterator;
pub const ValueIterator = Unmanaged.ValueIterator;
/// The integer type used to store the size of the map
pub const Size = Unmanaged.Size;
/// The type returned from getOrPut and variants
pub const GetOrPutResult = Unmanaged.GetOrPutResult;
const Self = @This();
/// Create a managed hash map with an empty context.
/// If the context is not zero-sized, you must use
/// initContext(allocator, ctx) instead.
pub fn init(allocator: *Allocator) Self {
if (@sizeOf(Context) != 0) {
@compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
}
return .{
.unmanaged = .{},
.allocator = allocator,
.ctx = undefined, // ctx is zero-sized so this is safe.
};
}
/// Create a managed hash map with a context
pub fn initContext(allocator: *Allocator, ctx: Context) Self {
return .{
.unmanaged = .{},
.allocator = allocator,
.ctx = ctx,
};
}
/// Release the backing array and invalidate this map.
/// This does *not* deinit keys, values, or the context!
/// If your keys or values need to be released, ensure
/// that that is done before calling this function.
pub fn deinit(self: *Self) void {
self.unmanaged.deinit(self.allocator);
self.* = undefined;
}
/// Empty the map, but keep the backing allocation for future use.
/// This does *not* free keys or values! Be sure to
/// release them if they need deinitialization before
/// calling this function.
pub fn clearRetainingCapacity(self: *Self) void {
return self.unmanaged.clearRetainingCapacity();
}
/// Empty the map and release the backing allocation.
/// This does *not* free keys or values! Be sure to
/// release them if they need deinitialization before
/// calling this function.
pub fn clearAndFree(self: *Self) void {
return self.unmanaged.clearAndFree(self.allocator);
}
/// Return the number of items in the map.
pub fn count(self: Self) Size {
return self.unmanaged.count();
}
/// Create an iterator over the entries in the map.
/// The iterator is invalidated if the map is modified.
pub fn iterator(self: *const Self) Iterator {
return self.unmanaged.iterator();
}
/// Create an iterator over the keys in the map.
/// The iterator is invalidated if the map is modified.
pub fn keyIterator(self: *const Self) KeyIterator {
return self.unmanaged.keyIterator();
}
/// Create an iterator over the values in the map.
/// The iterator is invalidated if the map is modified.
pub fn valueIterator(self: *const Self) ValueIterator {
return self.unmanaged.valueIterator();
}
/// If key exists this function cannot fail.
/// If there is an existing item with `key`, then the result
/// `Entry` pointer points to it, and found_existing is true.
/// `Entry` pointers point to it, and found_existing is true.
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointer points to it. Caller should then initialize
/// the `Entry` pointers point to it. Caller should then initialize
/// the value (but not the key).
pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
return self.unmanaged.getOrPut(self.allocator, key);
return self.unmanaged.getOrPutContext(self.allocator, key, self.ctx);
}
/// If key exists this function cannot fail.
/// If there is an existing item with `key`, then the result
/// `Entry` pointers point to it, and found_existing is true.
/// Otherwise, puts a new item with undefined key and value, and
/// the `Entry` pointers point to it. Caller must then initialize
/// the key and value.
pub fn getOrPutAdapted(self: *Self, key: anytype, ctx: anytype) !GetOrPutResult {
return self.unmanaged.getOrPutContextAdapted(self.allocator, key, ctx, self.ctx);
}
/// If there is an existing item with `key`, then the result
/// `Entry` pointer points to it, and found_existing is true.
/// `Entry` pointers point to it, and found_existing is true.
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointer points to it. Caller should then initialize
/// the `Entry` pointers point to it. Caller should then initialize
/// the value (but not the key).
/// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it.
pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacity(key);
return self.unmanaged.getOrPutAssumeCapacityContext(key, self.ctx);
}
pub fn getOrPutValue(self: *Self, key: K, value: V) !*Entry {
return self.unmanaged.getOrPutValue(self.allocator, key, value);
/// If there is an existing item with `key`, then the result
/// `Entry` pointers point to it, and found_existing is true.
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointers point to it. Caller must then initialize
/// the key and value.
/// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it.
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacityAdapted(self.allocator, key, ctx);
}
pub fn getOrPutValue(self: *Self, key: K, value: V) !Entry {
return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx);
}
/// Increases capacity, guaranteeing that insertions up until the
/// `expected_count` will not cause an allocation, and therefore cannot fail.
pub fn ensureCapacity(self: *Self, expected_count: Size) !void {
return self.unmanaged.ensureCapacity(self.allocator, expected_count);
return self.unmanaged.ensureCapacityContext(self.allocator, expected_count, self.ctx);
}
/// Returns the number of total elements which may be present before it is
@@ -174,67 +494,114 @@ pub fn HashMap(
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPut`.
pub fn put(self: *Self, key: K, value: V) !void {
return self.unmanaged.put(self.allocator, key, value);
return self.unmanaged.putContext(self.allocator, key, value, self.ctx);
}
/// Inserts a key-value pair into the hash map, asserting that no previous
/// entry with the same key is already present
pub fn putNoClobber(self: *Self, key: K, value: V) !void {
return self.unmanaged.putNoClobber(self.allocator, key, value);
return self.unmanaged.putNoClobberContext(self.allocator, key, value, self.ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
return self.unmanaged.putAssumeCapacity(key, value);
return self.unmanaged.putAssumeCapacityContext(key, value, self.ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Asserts that it does not clobber any existing data.
/// To detect if a put would clobber existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
return self.unmanaged.putAssumeCapacityNoClobber(key, value);
return self.unmanaged.putAssumeCapacityNoClobberContext(key, value, self.ctx);
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
pub fn fetchPut(self: *Self, key: K, value: V) !?Entry {
return self.unmanaged.fetchPut(self.allocator, key, value);
pub fn fetchPut(self: *Self, key: K, value: V) !?KV {
return self.unmanaged.fetchPutContext(self.allocator, key, value, self.ctx);
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
/// If insertion happuns, asserts there is enough capacity without allocating.
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry {
return self.unmanaged.fetchPutAssumeCapacity(key, value);
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
return self.unmanaged.fetchPutAssumeCapacityContext(key, value, self.ctx);
}
/// Removes a value from the map and returns the removed kv pair.
pub fn fetchRemove(self: *Self, key: K) ?KV {
return self.unmanaged.fetchRemoveContext(key, self.ctx);
}
pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
return self.unmanaged.fetchRemoveAdapted(key, ctx);
}
/// Finds the value associated with a key in the map
pub fn get(self: Self, key: K) ?V {
return self.unmanaged.get(key);
return self.unmanaged.getContext(key, self.ctx);
}
pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
return self.unmanaged.getAdapted(key, ctx);
}
pub fn getEntry(self: Self, key: K) ?*Entry {
return self.unmanaged.getEntry(key);
pub fn getPtr(self: Self, key: K) ?*V {
return self.unmanaged.getPtrContext(key, self.ctx);
}
pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
return self.unmanaged.getPtrAdapted(key, self.ctx);
}
/// Finds the key and value associated with a key in the map
pub fn getEntry(self: Self, key: K) ?Entry {
return self.unmanaged.getEntryContext(key, self.ctx);
}
pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
return self.unmanaged.getEntryAdapted(key, ctx);
}
/// Check if the map contains a key
pub fn contains(self: Self, key: K) bool {
return self.unmanaged.contains(key);
return self.unmanaged.containsContext(key, self.ctx);
}
pub fn containsAdapted(self: Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.containsAdapted(key, ctx);
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function.
pub fn remove(self: *Self, key: K) ?Entry {
return self.unmanaged.remove(key);
pub fn remove(self: *Self, key: K) bool {
return self.unmanaged.removeContext(key, self.ctx);
}
/// Asserts there is an `Entry` with matching key, deletes it from the hash map,
/// and discards it.
pub fn removeAssertDiscard(self: *Self, key: K) void {
return self.unmanaged.removeAssertDiscard(key);
pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
return self.unmanaged.removeAdapted(key, ctx);
}
/// Creates a copy of this map, using the same allocator
pub fn clone(self: Self) !Self {
var other = try self.unmanaged.clone(self.allocator);
return other.promote(self.allocator);
var other = try self.unmanaged.cloneContext(self.allocator, self.ctx);
return other.promoteContext(self.allocator, self.ctx);
}
/// Creates a copy of this map, using a specified allocator
pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self {
var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
return other.promoteContext(new_allocator, self.ctx);
}
/// Creates a copy of this map, using a specified context
pub fn cloneWithContext(self: Self, new_ctx: anytype) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = try self.unmanaged.cloneContext(self.allocator, new_ctx);
return other.promoteContext(self.allocator, new_ctx);
}
/// Creates a copy of this map, using a specified allocator and context
pub fn cloneWithAllocatorAndContext(new_allocator: *Allocator, new_ctx: anytype) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
return other.promoteContext(new_allocator, new_ctx);
}
};
}
@@ -251,11 +618,12 @@ pub fn HashMap(
pub fn HashMapUnmanaged(
comptime K: type,
comptime V: type,
hashFn: fn (key: K) u64,
eqlFn: fn (a: K, b: K) bool,
comptime Context: type,
comptime max_load_percentage: u64,
) type {
comptime assert(max_load_percentage > 0 and max_load_percentage < 100);
if (max_load_percentage <= 0 or max_load_percentage >= 100)
@compileError("max_load_percentage must be between 0 and 100.");
comptime verifyContext(Context, K, K, u64);
return struct {
const Self = @This();
@@ -284,19 +652,25 @@ pub fn HashMapUnmanaged(
const minimal_capacity = 8;
// This hashmap is specially designed for sizes that fit in a u32.
const Size = u32;
pub const Size = u32;
// u64 hashes guarantee us that the fingerprint bits will never be used
// to compute the index of a slot, maximizing the use of entropy.
const Hash = u64;
pub const Hash = u64;
pub const Entry = struct {
key_ptr: *K,
value_ptr: *V,
};
pub const KV = struct {
key: K,
value: V,
};
const Header = packed struct {
entries: [*]Entry,
values: [*]V,
keys: [*]K,
capacity: Size,
};
@@ -353,11 +727,11 @@ pub fn HashMapUnmanaged(
assert(@alignOf(Metadata) == 1);
}
const Iterator = struct {
pub const Iterator = struct {
hm: *const Self,
index: Size = 0,
pub fn next(it: *Iterator) ?*Entry {
pub fn next(it: *Iterator) ?Entry {
assert(it.index <= it.hm.capacity());
if (it.hm.size == 0) return null;
@@ -370,9 +744,10 @@ pub fn HashMapUnmanaged(
it.index += 1;
}) {
if (metadata[0].isUsed()) {
const entry = &it.hm.entries()[it.index];
const key = &it.hm.keys()[it.index];
const value = &it.hm.values()[it.index];
it.index += 1;
return entry;
return Entry{ .key_ptr = key, .value_ptr = value };
}
}
@@ -380,17 +755,50 @@ pub fn HashMapUnmanaged(
}
};
pub const KeyIterator = FieldIterator(K);
pub const ValueIterator = FieldIterator(V);
fn FieldIterator(comptime T: type) type {
return struct {
len: usize,
metadata: [*]const Metadata,
items: [*]T,
pub fn next(self: *@This()) ?*T {
while (self.len > 0) {
self.len -= 1;
const used = self.metadata[0].isUsed();
const item = &self.items[0];
self.metadata += 1;
self.items += 1;
if (used) {
return item;
}
}
return null;
}
};
}
pub const GetOrPutResult = struct {
entry: *Entry,
key_ptr: *K,
value_ptr: *V,
found_existing: bool,
};
pub const Managed = HashMap(K, V, hashFn, eqlFn, max_load_percentage);
pub const Managed = HashMap(K, V, Context, max_load_percentage);
pub fn promote(self: Self, allocator: *Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call promoteContext instead.");
return promoteContext(self, allocator, undefined);
}
pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = allocator,
.ctx = ctx,
};
}
@@ -403,26 +811,6 @@ pub fn HashMapUnmanaged(
self.* = undefined;
}
fn deallocate(self: *Self, allocator: *Allocator) void {
if (self.metadata == null) return;
const cap = self.capacity();
const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
const alignment = @alignOf(Entry) - 1;
const entries_size = @as(usize, cap) * @sizeOf(Entry) + alignment;
const total_size = meta_size + entries_size;
var slice: []u8 = undefined;
slice.ptr = @intToPtr([*]u8, @ptrToInt(self.header()));
slice.len = total_size;
allocator.free(slice);
self.metadata = null;
self.available = 0;
}
fn capacityForSize(size: Size) Size {
var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1);
new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable;
@@ -430,8 +818,13 @@ pub fn HashMapUnmanaged(
}
pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call ensureCapacityContext instead.");
return ensureCapacityContext(self, allocator, new_size, undefined);
}
pub fn ensureCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void {
if (new_size > self.size)
try self.growIfNeeded(allocator, new_size - self.size);
try self.growIfNeeded(allocator, new_size - self.size, ctx);
}
pub fn clearRetainingCapacity(self: *Self) void {
@@ -456,8 +849,12 @@ pub fn HashMapUnmanaged(
return @ptrCast(*Header, @ptrCast([*]Header, self.metadata.?) - 1);
}
fn entries(self: *const Self) [*]Entry {
return self.header().entries;
fn keys(self: *const Self) [*]K {
return self.header().keys;
}
fn values(self: *const Self) [*]V {
return self.header().values;
}
pub fn capacity(self: *const Self) Size {
@@ -470,28 +867,75 @@ pub fn HashMapUnmanaged(
return .{ .hm = self };
}
pub fn keyIterator(self: *const Self) KeyIterator {
if (self.metadata) |metadata| {
return .{
.len = self.capacity(),
.metadata = metadata,
.items = self.keys(),
};
} else {
return .{
.len = 0,
.metadata = undefined,
.items = undefined,
};
}
}
pub fn valueIterator(self: *const Self) ValueIterator {
if (self.metadata) |metadata| {
return .{
.len = self.capacity(),
.metadata = metadata,
.items = self.values(),
};
} else {
return .{
.len = 0,
.metadata = undefined,
.items = undefined,
};
}
}
/// Insert an entry in the map. Assumes it is not already present.
pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
assert(!self.contains(key));
try self.growIfNeeded(allocator, 1);
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putNoClobberContext instead.");
return self.putNoClobberContext(allocator, key, value, undefined);
}
pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
assert(!self.containsContext(key, ctx));
try self.growIfNeeded(allocator, 1, ctx);
self.putAssumeCapacityNoClobber(key, value);
self.putAssumeCapacityNoClobberContext(key, value, ctx);
}
/// Asserts there is enough capacity to store the new key-value pair.
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPutAssumeCapacity`.
pub fn putAssumeCapacity(self: *Self, key: K, value: V) void {
const gop = self.getOrPutAssumeCapacity(key);
gop.entry.value = value;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityContext instead.");
return self.putAssumeCapacityContext(key, value, undefined);
}
pub fn putAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) void {
const gop = self.getOrPutAssumeCapacityContext(key, ctx);
gop.value_ptr.* = value;
}
/// Insert an entry in the map. Assumes it is not already present,
/// and that no allocation is needed.
pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
assert(!self.contains(key));
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putAssumeCapacityNoClobberContext instead.");
return self.putAssumeCapacityNoClobberContext(key, value, undefined);
}
pub fn putAssumeCapacityNoClobberContext(self: *Self, key: K, value: V, ctx: Context) void {
assert(!self.containsContext(key, ctx));
const hash = hashFn(key);
const hash = ctx.hash(key);
const mask = self.capacity() - 1;
var idx = @truncate(usize, hash & mask);
@@ -508,40 +952,102 @@ pub fn HashMapUnmanaged(
const fingerprint = Metadata.takeFingerprint(hash);
metadata[0].fill(fingerprint);
self.entries()[idx] = Entry{ .key = key, .value = value };
self.keys()[idx] = key;
self.values()[idx] = value;
self.size += 1;
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?Entry {
const gop = try self.getOrPut(allocator, key);
var result: ?Entry = null;
pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchPutContext instead.");
return self.fetchPutContext(allocator, key, value, undefined);
}
pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
const gop = try self.getOrPutContext(allocator, key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
result = gop.entry.*;
result = KV{
.key = gop.key_ptr.*,
.value = gop.value_ptr.*,
};
}
gop.entry.value = value;
gop.value_ptr.* = value;
return result;
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
/// If insertion happens, asserts there is enough capacity without allocating.
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?Entry {
const gop = self.getOrPutAssumeCapacity(key);
var result: ?Entry = null;
pub fn fetchPutAssumeCapacity(self: *Self, key: K, value: V) ?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchPutAssumeCapacityContext instead.");
return self.fetchPutAssumeCapacityContext(key, value, undefined);
}
pub fn fetchPutAssumeCapacityContext(self: *Self, key: K, value: V, ctx: Context) ?KV {
const gop = self.getOrPutAssumeCapacityContext(key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
result = gop.entry.*;
result = KV{
.key = gop.key_ptr.*,
.value = gop.value_ptr.*,
};
}
gop.entry.value = value;
gop.value_ptr.* = value;
return result;
}
pub fn getEntry(self: Self, key: K) ?*Entry {
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function.
pub fn fetchRemove(self: *Self, key: K) ?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call fetchRemoveContext instead.");
return self.fetchRemoveContext(key, undefined);
}
pub fn fetchRemoveContext(self: *Self, key: K, ctx: Context) ?KV {
return self.fetchRemoveAdapted(key, ctx);
}
pub fn fetchRemoveAdapted(self: *Self, key: anytype, ctx: anytype) ?KV {
if (self.getIndex(key, ctx)) |idx| {
const old_key = &self.keys()[idx];
const old_val = &self.values()[idx];
const result = KV{
.key = old_key.*,
.value = old_val.*,
};
self.metadata.?[idx].remove();
old_key.* = undefined;
old_val.* = undefined;
self.size -= 1;
return result;
}
return null;
}
/// Find the index containing the data for the given key.
/// Whether this function returns null is almost always
/// branched on after this function returns, and this function
/// returns null/not null from separate code paths. We
/// want the optimizer to remove that branch and instead directly
/// fuse the basic blocks after the branch to the basic blocks
/// from this function. To encourage that, this function is
/// marked as inline.
fn getIndex(self: Self, key: anytype, ctx: anytype) callconv(.Inline) ?usize {
comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash);
if (self.size == 0) {
return null;
}
const hash = hashFn(key);
// If you get a compile error on this line, it means that your generic hash
// function is invalid for these parameters.
const hash = ctx.hash(key);
// verifyContext can't verify the return type of generic hash functions,
// so we need to double-check it here.
if (@TypeOf(hash) != Hash) {
@compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic hash function that returns the wrong type! "++@typeName(Hash)++" was expected, but found "++@typeName(@TypeOf(hash)));
}
const mask = self.capacity() - 1;
const fingerprint = Metadata.takeFingerprint(hash);
var idx = @truncate(usize, hash & mask);
@@ -549,11 +1055,20 @@ pub fn HashMapUnmanaged(
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed() or metadata[0].isTombstone()) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const entry = &self.entries()[idx];
if (eqlFn(entry.key, key)) {
return entry;
const test_key = &self.keys()[idx];
// If you get a compile error on this line, it means that your generic eql
// function is invalid for these parameters.
const eql = ctx.eql(key, test_key.*);
// verifyContext can't verify the return type of generic eql functions,
// so we need to double-check it here.
if (@TypeOf(eql) != bool) {
@compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic eql function that returns the wrong type! bool was expected, but found "++@typeName(@TypeOf(eql)));
}
if (eql) {
return idx;
}
}
idx = (idx + 1) & mask;
metadata = self.metadata.? + idx;
}
@@ -561,46 +1076,122 @@ pub fn HashMapUnmanaged(
return null;
}
pub fn getEntry(self: Self, key: K) ?Entry {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getEntryContext instead.");
return self.getEntryContext(key, undefined);
}
pub fn getEntryContext(self: Self, key: K, ctx: Context) ?Entry {
return self.getEntryAdapted(key, ctx);
}
pub fn getEntryAdapted(self: Self, key: anytype, ctx: anytype) ?Entry {
if (self.getIndex(key, ctx)) |idx| {
return Entry{
.key_ptr = &self.keys()[idx],
.value_ptr = &self.values()[idx],
};
}
return null;
}
/// Insert an entry if the associated key is not already present, otherwise update preexisting value.
pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
const result = try self.getOrPut(allocator, key);
result.entry.value = value;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call putContext instead.");
return self.putContext(allocator, key, value, undefined);
}
pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
result.value_ptr.* = value;
}
/// Get an optional pointer to the value associated with key, if present.
pub fn getPtr(self: Self, key: K) ?*V {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getPtrContext instead.");
return self.getPtrContext(key, undefined);
}
pub fn getPtrContext(self: Self, key: K, ctx: Context) ?*V {
return self.getPtrAdapted(key, ctx);
}
pub fn getPtrAdapted(self: Self, key: anytype, ctx: anytype) ?*V {
if (self.getIndex(key, ctx)) |idx| {
return &self.values()[idx];
}
return null;
}
/// Get a copy of the value associated with key, if present.
pub fn get(self: Self, key: K) ?V {
if (self.size == 0) {
return null;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getContext instead.");
return self.getContext(key, undefined);
}
pub fn getContext(self: Self, key: K, ctx: Context) ?V {
return self.getAdapted(key, ctx);
}
pub fn getAdapted(self: Self, key: anytype, ctx: anytype) ?V {
if (self.getIndex(key, ctx)) |idx| {
return self.values()[idx];
}
const hash = hashFn(key);
const mask = self.capacity() - 1;
const fingerprint = Metadata.takeFingerprint(hash);
var idx = @truncate(usize, hash & mask);
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed() or metadata[0].isTombstone()) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const entry = &self.entries()[idx];
if (eqlFn(entry.key, key)) {
return entry.value;
}
}
idx = (idx + 1) & mask;
metadata = self.metadata.? + idx;
}
return null;
}
pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
try self.growIfNeeded(allocator, 1);
return self.getOrPutAssumeCapacity(key);
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContext instead.");
return self.getOrPutContext(allocator, key, undefined);
}
pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutContextAdapted instead.");
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
self.growIfNeeded(allocator, 1, ctx) catch |err| {
// If allocation fails, try to do the lookup anyway.
// If we find an existing item, we can return it.
// Otherwise return the error, we could not add another.
const index = self.getIndex(key, key_ctx) orelse return err;
return GetOrPutResult{
.key_ptr = &self.keys()[index],
.value_ptr = &self.values()[index],
.found_existing = true,
};
};
return self.getOrPutAssumeCapacityAdapted(key, key_ctx);
}
pub fn getOrPutAssumeCapacity(self: *Self, key: K) GetOrPutResult {
const hash = hashFn(key);
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutAssumeCapacityContext instead.");
return self.getOrPutAssumeCapacityContext(key, undefined);
}
pub fn getOrPutAssumeCapacityContext(self: *Self, key: K, ctx: Context) GetOrPutResult {
const result = self.getOrPutAssumeCapacityAdapted(key, ctx);
if (!result.found_existing) {
result.key_ptr.* = key;
}
return result;
}
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
comptime verifyContext(@TypeOf(ctx), @TypeOf(key), K, Hash);
// If you get a compile error on this line, it means that your generic hash
// function is invalid for these parameters.
const hash = ctx.hash(key);
// verifyContext can't verify the return type of generic hash functions,
// so we need to double-check it here.
if (@TypeOf(hash) != Hash) {
@compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic hash function that returns the wrong type! "++@typeName(Hash)++" was expected, but found "++@typeName(@TypeOf(hash)));
}
const mask = self.capacity() - 1;
const fingerprint = Metadata.takeFingerprint(hash);
var idx = @truncate(usize, hash & mask);
@@ -609,9 +1200,21 @@ pub fn HashMapUnmanaged(
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed() or metadata[0].isTombstone()) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const entry = &self.entries()[idx];
if (eqlFn(entry.key, key)) {
return GetOrPutResult{ .entry = entry, .found_existing = true };
const test_key = &self.keys()[idx];
// If you get a compile error on this line, it means that your generic eql
// function is invalid for these parameters.
const eql = ctx.eql(key, test_key.*);
// verifyContext can't verify the return type of generic eql functions,
// so we need to double-check it here.
if (@TypeOf(eql) != bool) {
@compileError("Context "++@typeName(@TypeOf(ctx))++" has a generic eql function that returns the wrong type! bool was expected, but found "++@typeName(@TypeOf(eql)));
}
if (eql) {
return GetOrPutResult{
.key_ptr = test_key,
.value_ptr = &self.values()[idx],
.found_existing = true,
};
}
} else if (first_tombstone_idx == self.capacity() and metadata[0].isTombstone()) {
first_tombstone_idx = idx;
@@ -631,79 +1234,67 @@ pub fn HashMapUnmanaged(
}
metadata[0].fill(fingerprint);
const entry = &self.entries()[idx];
entry.* = .{ .key = key, .value = undefined };
const new_key = &self.keys()[idx];
const new_value = &self.values()[idx];
new_key.* = key;
new_value.* = undefined;
self.size += 1;
return GetOrPutResult{ .entry = entry, .found_existing = false };
return GetOrPutResult{
.key_ptr = new_key,
.value_ptr = new_value,
.found_existing = false,
};
}
pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !*Entry {
const res = try self.getOrPut(allocator, key);
if (!res.found_existing) res.entry.value = value;
return res.entry;
pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call getOrPutValueContext instead.");
return self.getOrPutValueContext(allocator, key, value, undefined);
}
pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry {
const res = try self.getOrPutAdapted(allocator, key, ctx);
if (!res.found_existing) {
res.key_ptr.* = key;
res.value_ptr.* = value;
}
return Entry{ .key_ptr = res.key_ptr, .value_ptr = res.value_ptr };
}
/// Return true if there is a value associated with key in the map.
pub fn contains(self: *const Self, key: K) bool {
return self.get(key) != null;
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call containsContext instead.");
return self.containsContext(key, undefined);
}
pub fn containsContext(self: *const Self, key: K, ctx: Context) bool {
return self.containsAdapted(key, ctx);
}
pub fn containsAdapted(self: *const Self, key: anytype, ctx: anytype) bool {
return self.getIndex(key, ctx) != null;
}
/// If there is an `Entry` with a matching key, it is deleted from
/// the hash map, and then returned from this function.
pub fn remove(self: *Self, key: K) ?Entry {
if (self.size == 0) return null;
const hash = hashFn(key);
const mask = self.capacity() - 1;
const fingerprint = Metadata.takeFingerprint(hash);
var idx = @truncate(usize, hash & mask);
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed() or metadata[0].isTombstone()) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const entry = &self.entries()[idx];
if (eqlFn(entry.key, key)) {
const removed_entry = entry.*;
metadata[0].remove();
entry.* = undefined;
self.size -= 1;
return removed_entry;
}
}
idx = (idx + 1) & mask;
metadata = self.metadata.? + idx;
}
return null;
/// the hash map, and this function returns true. Otherwise this
/// function returns false.
pub fn remove(self: *Self, key: K) bool {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call removeContext instead.");
return self.removeContext(key, undefined);
}
/// Asserts there is an `Entry` with matching key, deletes it from the hash map,
/// and discards it.
pub fn removeAssertDiscard(self: *Self, key: K) void {
assert(self.contains(key));
const hash = hashFn(key);
const mask = self.capacity() - 1;
const fingerprint = Metadata.takeFingerprint(hash);
var idx = @truncate(usize, hash & mask);
var metadata = self.metadata.? + idx;
while (metadata[0].isUsed() or metadata[0].isTombstone()) {
if (metadata[0].isUsed() and metadata[0].fingerprint == fingerprint) {
const entry = &self.entries()[idx];
if (eqlFn(entry.key, key)) {
metadata[0].remove();
entry.* = undefined;
self.size -= 1;
return;
}
}
idx = (idx + 1) & mask;
metadata = self.metadata.? + idx;
pub fn removeContext(self: *Self, key: K, ctx: Context) bool {
return self.removeAdapted(key, ctx);
}
pub fn removeAdapted(self: *Self, key: anytype, ctx: anytype) bool {
if (self.getIndex(key, ctx)) |idx| {
self.metadata.?[idx].remove();
self.keys()[idx] = undefined;
self.values()[idx] = undefined;
self.size -= 1;
return true;
}
unreachable;
return false;
}
fn initMetadatas(self: *Self) void {
@@ -718,14 +1309,19 @@ pub fn HashMapUnmanaged(
return @truncate(Size, max_load - self.available);
}
fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size) !void {
fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void {
if (new_count > self.available) {
try self.grow(allocator, capacityForSize(self.load() + new_count));
try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
}
}
pub fn clone(self: Self, allocator: *Allocator) !Self {
var other = Self{};
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context "++@typeName(Context)++", call cloneContext instead.");
return self.cloneContext(allocator, @as(Context, undefined));
}
pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
if (self.size == 0)
return other;
@@ -736,11 +1332,11 @@ pub fn HashMapUnmanaged(
var i: Size = 0;
var metadata = self.metadata.?;
var entr = self.entries();
var keys_ptr = self.keys();
var values_ptr = self.values();
while (i < self.capacity()) : (i += 1) {
if (metadata[i].isUsed()) {
const entry = &entr[i];
other.putAssumeCapacityNoClobber(entry.key, entry.value);
other.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], new_ctx);
if (other.size == self.size)
break;
}
@@ -749,7 +1345,8 @@ pub fn HashMapUnmanaged(
return other;
}
fn grow(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void {
@setCold(true);
const new_cap = std.math.max(new_capacity, minimal_capacity);
assert(new_cap > self.capacity());
assert(std.math.isPowerOfTwo(new_cap));
@@ -764,11 +1361,11 @@ pub fn HashMapUnmanaged(
const old_capacity = self.capacity();
var i: Size = 0;
var metadata = self.metadata.?;
var entr = self.entries();
var keys_ptr = self.keys();
var values_ptr = self.values();
while (i < old_capacity) : (i += 1) {
if (metadata[i].isUsed()) {
const entry = &entr[i];
map.putAssumeCapacityNoClobber(entry.key, entry.value);
map.putAssumeCapacityNoClobberContext(keys_ptr[i], values_ptr[i], ctx);
if (map.size == self.size)
break;
}
@@ -780,26 +1377,64 @@ pub fn HashMapUnmanaged(
}
fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
const max_align = comptime math.max3(header_align, key_align, val_align);
const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
comptime assert(@alignOf(Metadata) == 1);
const alignment = @alignOf(Entry) - 1;
const entries_size = @as(usize, new_capacity) * @sizeOf(Entry) + alignment;
const keys_start = std.mem.alignForward(meta_size, key_align);
const keys_end = keys_start + new_capacity * @sizeOf(K);
const total_size = meta_size + entries_size;
const vals_start = std.mem.alignForward(keys_end, val_align);
const vals_end = vals_start + new_capacity * @sizeOf(V);
const slice = try allocator.alignedAlloc(u8, @alignOf(Header), total_size);
const total_size = std.mem.alignForward(vals_end, max_align);
const slice = try allocator.alignedAlloc(u8, max_align, total_size);
const ptr = @ptrToInt(slice.ptr);
const metadata = ptr + @sizeOf(Header);
var entry_ptr = ptr + meta_size;
entry_ptr = (entry_ptr + alignment) & ~@as(usize, alignment);
assert(entry_ptr + @as(usize, new_capacity) * @sizeOf(Entry) <= ptr + total_size);
const hdr = @intToPtr(*Header, ptr);
hdr.entries = @intToPtr([*]Entry, entry_ptr);
if (@sizeOf([*]V) != 0) {
hdr.values = @intToPtr([*]V, ptr + vals_start);
}
if (@sizeOf([*]K) != 0) {
hdr.keys = @intToPtr([*]K, ptr + keys_start);
}
hdr.capacity = new_capacity;
self.metadata = @intToPtr([*]Metadata, metadata);
}
fn deallocate(self: *Self, allocator: *Allocator) void {
if (self.metadata == null) return;
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
const max_align = comptime math.max3(header_align, key_align, val_align);
const cap = self.capacity();
const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata);
comptime assert(@alignOf(Metadata) == 1);
const keys_start = std.mem.alignForward(meta_size, key_align);
const keys_end = keys_start + cap * @sizeOf(K);
const vals_start = std.mem.alignForward(keys_end, val_align);
const vals_end = vals_start + cap * @sizeOf(V);
const total_size = std.mem.alignForward(vals_end, max_align);
const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size];
allocator.free(slice);
self.metadata = null;
self.available = 0;
}
};
}
@@ -822,14 +1457,14 @@ test "std.hash_map basic usage" {
var sum: u32 = 0;
var it = map.iterator();
while (it.next()) |kv| {
sum += kv.key;
sum += kv.key_ptr.*;
}
try expect(sum == total);
try expectEqual(total, sum);
i = 0;
sum = 0;
while (i < count) : (i += 1) {
try expectEqual(map.get(i).?, i);
try expectEqual(i, map.get(i).?);
sum += map.get(i).?;
}
try expectEqual(total, sum);
@@ -903,7 +1538,7 @@ test "std.hash_map grow" {
i = 0;
var it = map.iterator();
while (it.next()) |kv| {
try expectEqual(kv.key, kv.value);
try expectEqual(kv.key_ptr.*, kv.value_ptr.*);
i += 1;
}
try expectEqual(i, growTo);
@@ -931,9 +1566,9 @@ test "std.hash_map clone" {
defer b.deinit();
try expectEqual(b.count(), 3);
try expectEqual(b.get(1), 1);
try expectEqual(b.get(2), 2);
try expectEqual(b.get(3), 3);
try expectEqual(b.get(1).?, 1);
try expectEqual(b.get(2).?, 2);
try expectEqual(b.get(3).?, 3);
}
test "std.hash_map ensureCapacity with existing elements" {
@@ -975,8 +1610,8 @@ test "std.hash_map remove" {
try expectEqual(map.count(), 10);
var it = map.iterator();
while (it.next()) |kv| {
try expectEqual(kv.key, kv.value);
try expect(kv.key % 3 != 0);
try expectEqual(kv.key_ptr.*, kv.value_ptr.*);
try expect(kv.key_ptr.* % 3 != 0);
}
i = 0;
@@ -1146,7 +1781,7 @@ test "std.hash_map putAssumeCapacity" {
i = 0;
var sum = i;
while (i < 20) : (i += 1) {
sum += map.get(i).?;
sum += map.getPtr(i).?.*;
}
try expectEqual(sum, 190);
@@ -1201,33 +1836,34 @@ test "std.hash_map basic hash map usage" {
const gop1 = try map.getOrPut(5);
try testing.expect(gop1.found_existing == true);
try testing.expect(gop1.entry.value == 55);
gop1.entry.value = 77;
try testing.expect(map.getEntry(5).?.value == 77);
try testing.expect(gop1.value_ptr.* == 55);
gop1.value_ptr.* = 77;
try testing.expect(map.getEntry(5).?.value_ptr.* == 77);
const gop2 = try map.getOrPut(99);
try testing.expect(gop2.found_existing == false);
gop2.entry.value = 42;
try testing.expect(map.getEntry(99).?.value == 42);
gop2.value_ptr.* = 42;
try testing.expect(map.getEntry(99).?.value_ptr.* == 42);
const gop3 = try map.getOrPutValue(5, 5);
try testing.expect(gop3.value == 77);
try testing.expect(gop3.value_ptr.* == 77);
const gop4 = try map.getOrPutValue(100, 41);
try testing.expect(gop4.value == 41);
try testing.expect(gop4.value_ptr.* == 41);
try testing.expect(map.contains(2));
try testing.expect(map.getEntry(2).?.value == 22);
try testing.expect(map.getEntry(2).?.value_ptr.* == 22);
try testing.expect(map.get(2).? == 22);
const rmv1 = map.remove(2);
const rmv1 = map.fetchRemove(2);
try testing.expect(rmv1.?.key == 2);
try testing.expect(rmv1.?.value == 22);
try testing.expect(map.remove(2) == null);
try testing.expect(map.fetchRemove(2) == null);
try testing.expect(map.remove(2) == false);
try testing.expect(map.getEntry(2) == null);
try testing.expect(map.get(2) == null);
map.removeAssertDiscard(3);
try testing.expect(map.remove(3) == true);
}
test "std.hash_map clone" {
@@ -1247,3 +1883,14 @@ test "std.hash_map clone" {
try testing.expect(copy.get(i).? == i * 10);
}
}
test "compile everything" {
std.testing.refAllDecls(AutoHashMap(i32, i32));
std.testing.refAllDecls(StringHashMap([]const u8));
std.testing.refAllDecls(AutoHashMap(i32, void));
std.testing.refAllDecls(StringHashMap(u0));
std.testing.refAllDecls(AutoHashMapUnmanaged(i32, i32));
std.testing.refAllDecls(StringHashMapUnmanaged([]const u8));
std.testing.refAllDecls(AutoHashMapUnmanaged(i32, void));
std.testing.refAllDecls(StringHashMapUnmanaged(u0));
}
+10 -10
View File
@@ -346,10 +346,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
break;
}
}
var it = self.large_allocations.iterator();
var it = self.large_allocations.valueIterator();
while (it.next()) |large_alloc| {
log.err("memory address 0x{x} leaked: {s}", .{
@ptrToInt(large_alloc.value.bytes.ptr), large_alloc.value.getStackTrace(),
@ptrToInt(large_alloc.bytes.ptr), large_alloc.getStackTrace(),
});
leaks = true;
}
@@ -444,7 +444,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
};
if (config.safety and old_mem.len != entry.value.bytes.len) {
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
var free_stack_trace = StackTrace{
.instruction_addresses = &addresses,
@@ -452,9 +452,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
};
std.debug.captureStackTrace(ret_addr, &free_stack_trace);
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {s} Free: {s}", .{
entry.value.bytes.len,
entry.value_ptr.bytes.len,
old_mem.len,
entry.value.getStackTrace(),
entry.value_ptr.getStackTrace(),
free_stack_trace,
});
}
@@ -466,7 +466,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
}
self.large_allocations.removeAssertDiscard(@ptrToInt(old_mem.ptr));
assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr)));
return 0;
}
@@ -475,8 +475,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_mem.len, old_mem.ptr, new_size,
});
}
entry.value.bytes = old_mem.ptr[0..result_len];
collectStackTrace(ret_addr, &entry.value.stack_addresses);
entry.value_ptr.bytes = old_mem.ptr[0..result_len];
collectStackTrace(ret_addr, &entry.value_ptr.stack_addresses);
return result_len;
}
@@ -645,8 +645,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
gop.entry.value.bytes = slice;
collectStackTrace(ret_addr, &gop.entry.value.stack_addresses);
gop.value_ptr.bytes = slice;
collectStackTrace(ret_addr, &gop.value_ptr.stack_addresses);
if (config.verbose_log) {
log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
+2 -2
View File
@@ -1303,14 +1303,14 @@ pub const Value = union(enum) {
try child_whitespace.outputIndent(out_stream);
}
try stringify(entry.key, options, out_stream);
try stringify(entry.key_ptr.*, options, out_stream);
try out_stream.writeByte(':');
if (child_options.whitespace) |child_whitespace| {
if (child_whitespace.separator) {
try out_stream.writeByte(' ');
}
}
try stringify(entry.value, child_options, out_stream);
try stringify(entry.value_ptr.*, child_options, out_stream);
}
if (field_output) {
if (options.whitespace) |whitespace| {
+47 -4
View File
@@ -380,12 +380,41 @@ test "math.min" {
}
}
/// Finds the min of three numbers
pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
return min(x, min(y, z));
}
test "math.min3" {
try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0);
try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0);
try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0);
try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0);
try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0);
try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0);
}
pub fn max(x: anytype, y: anytype) @TypeOf(x, y) {
return if (x > y) x else y;
}
test "math.max" {
try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2);
try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2);
}
/// Finds the max of three numbers
pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) {
return max(x, max(y, z));
}
test "math.max3" {
try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2);
try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2);
try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2);
try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2);
try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2);
try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2);
}
pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) {
@@ -581,6 +610,17 @@ pub fn Log2Int(comptime T: type) type {
return std.meta.Int(.unsigned, count);
}
pub fn Log2IntCeil(comptime T: type) type {
// comptime ceil log2
comptime var count = 0;
comptime var s = @typeInfo(T).Int.bits;
inline while (s != 0) : (s >>= 1) {
count += 1;
}
return std.meta.Int(.unsigned, count);
}
pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type {
assert(from <= to);
if (from == 0 and to == 0) {
@@ -1046,15 +1086,18 @@ fn testCeilPowerOfTwo() !void {
}
pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned)
@compileError("log2_int requires an unsigned integer, found "++@typeName(T));
assert(x != 0);
return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
}
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
pub fn log2_int_ceil(comptime T: type, x: T) Log2IntCeil(T) {
if (@typeInfo(T) != .Int or @typeInfo(T).Int.signedness != .unsigned)
@compileError("log2_int_ceil requires an unsigned integer, found "++@typeName(T));
assert(x != 0);
const log2_val = log2_int(T, x);
if (@as(T, 1) << log2_val == x)
return log2_val;
if (x == 1) return 0;
const log2_val: Log2IntCeil(T) = log2_int(T, x - 1);
return log2_val + 1;
}
+137 -23
View File
@@ -10,6 +10,15 @@ const mem = std.mem;
const Allocator = mem.Allocator;
const testing = std.testing;
/// A MultiArrayList stores a list of a struct type.
/// Instead of storing a single list of items, MultiArrayList
/// stores separate lists for each field of the struct.
/// This allows for memory savings if the struct has padding,
/// and also improves cache usage if only some fields are needed
/// for a computation. The primary API for accessing fields is
/// the `slice()` function, which computes the start pointers
/// for the array of each field. From the slice you can call
/// `.items(.<field_name>)` to obtain a slice of field values.
pub fn MultiArrayList(comptime S: type) type {
return struct {
bytes: [*]align(@alignOf(S)) u8 = undefined,
@@ -20,6 +29,10 @@ pub fn MultiArrayList(comptime S: type) type {
pub const Field = meta.FieldEnum(S);
/// A MultiArrayList.Slice contains cached start pointers for each field in the list.
/// These pointers are not normally stored to reduce the size of the list in memory.
/// If you are accessing multiple fields, call slice() first to compute the pointers,
/// and then get the field arrays from the slice.
pub const Slice = struct {
/// This array is indexed by the field index which can be obtained
/// by using @enumToInt() on the Field enum
@@ -29,11 +42,12 @@ pub fn MultiArrayList(comptime S: type) type {
pub fn items(self: Slice, comptime field: Field) []FieldType(field) {
const F = FieldType(field);
if (self.len == 0) {
if (self.capacity == 0) {
return &[_]F{};
}
const byte_ptr = self.ptrs[@enumToInt(field)];
const casted_ptr = @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr));
const casted_ptr: [*]F = if (@sizeOf([*]F) == 0) undefined
else @ptrCast([*]F, @alignCast(@alignOf(F), byte_ptr));
return casted_ptr[0..self.len];
}
@@ -74,12 +88,12 @@ pub fn MultiArrayList(comptime S: type) type {
data[i] = .{
.size = @sizeOf(field_info.field_type),
.size_index = i,
.alignment = field_info.alignment,
.alignment = if (@sizeOf(field_info.field_type) == 0) 1 else field_info.alignment,
};
}
const Sort = struct {
fn lessThan(trash: *i32, lhs: Data, rhs: Data) bool {
return lhs.alignment >= rhs.alignment;
return lhs.alignment > rhs.alignment;
}
};
var trash: i32 = undefined; // workaround for stage1 compiler bug
@@ -109,6 +123,9 @@ pub fn MultiArrayList(comptime S: type) type {
return result;
}
/// Compute pointers to the start of each field of the array.
/// If you need to access multiple fields, calling this may
/// be more efficient than calling `items()` multiple times.
pub fn slice(self: Self) Slice {
var result: Slice = .{
.ptrs = undefined,
@@ -123,6 +140,9 @@ pub fn MultiArrayList(comptime S: type) type {
return result;
}
/// Get the slice of values for a specified field.
/// If you need multiple fields, consider calling slice()
/// instead.
pub fn items(self: Self, comptime field: Field) []FieldType(field) {
return self.slice().items(field);
}
@@ -159,6 +179,72 @@ pub fn MultiArrayList(comptime S: type) type {
self.set(self.len - 1, elem);
}
/// Extend the list by 1 element, asserting `self.capacity`
/// is sufficient to hold an additional item. Returns the
/// newly reserved index with uninitialized data.
pub fn addOneAssumeCapacity(self: *Self) usize {
assert(self.len < self.capacity);
const index = self.len;
self.len += 1;
return index;
}
/// Inserts an item into an ordered list. Shifts all elements
/// after and including the specified index back by one and
/// sets the given index to the specified element. May reallocate
/// and invalidate iterators.
pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
try self.ensureCapacity(gpa, self.len + 1);
self.insertAssumeCapacity(index, elem);
}
/// Inserts an item into an ordered list which has room for it.
/// Shifts all elements after and including the specified index
/// back by one and sets the given index to the specified element.
/// Will not reallocate the array, does not invalidate iterators.
pub fn insertAssumeCapacity(self: *Self, index: usize, elem: S) void {
assert(self.len < self.capacity);
assert(index <= self.len);
self.len += 1;
const slices = self.slice();
inline for (fields) |field_info, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i: usize = self.len-1;
while (i > index) : (i -= 1) {
field_slice[i] = field_slice[i-1];
}
field_slice[index] = @field(elem, field_info.name);
}
}
/// Remove the specified item from the list, swapping the last
/// item in the list into its position. Fast, but does not
/// retain list ordering.
pub fn swapRemove(self: *Self, index: usize) void {
const slices = self.slice();
inline for (fields) |field_info, i| {
const field_slice = slices.items(@intToEnum(Field, i));
field_slice[index] = field_slice[self.len-1];
field_slice[self.len-1] = undefined;
}
self.len -= 1;
}
/// Remove the specified item from the list, shifting items
/// after it to preserve order.
pub fn orderedRemove(self: *Self, index: usize) void {
const slices = self.slice();
inline for (fields) |field_info, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i = index;
while (i < self.len-1) : (i += 1) {
field_slice[i] = field_slice[i+1];
}
field_slice[i] = undefined;
}
self.len -= 1;
}
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void {
@@ -186,13 +272,15 @@ pub fn MultiArrayList(comptime S: type) type {
) catch {
const self_slice = self.slice();
inline for (fields) |field_info, i| {
const field = @intToEnum(Field, i);
const dest_slice = self_slice.items(field)[new_len..];
const byte_count = dest_slice.len * @sizeOf(field_info.field_type);
// We use memset here for more efficient codegen in safety-checked,
// valgrind-enabled builds. Otherwise the valgrind client request
// will be repeated for every element.
@memset(@ptrCast([*]u8, dest_slice.ptr), undefined, byte_count);
if (@sizeOf(field_info.field_type) != 0) {
const field = @intToEnum(Field, i);
const dest_slice = self_slice.items(field)[new_len..];
const byte_count = dest_slice.len * @sizeOf(field_info.field_type);
// We use memset here for more efficient codegen in safety-checked,
// valgrind-enabled builds. Otherwise the valgrind client request
// will be repeated for every element.
@memset(@ptrCast([*]u8, dest_slice.ptr), undefined, byte_count);
}
}
self.len = new_len;
return;
@@ -206,12 +294,14 @@ pub fn MultiArrayList(comptime S: type) type {
const self_slice = self.slice();
const other_slice = other.slice();
inline for (fields) |field_info, i| {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(other_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
if (@sizeOf(field_info.field_type) != 0) {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(other_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
}
}
gpa.free(self.allocatedBytes());
self.* = other;
@@ -273,17 +363,41 @@ pub fn MultiArrayList(comptime S: type) type {
const self_slice = self.slice();
const other_slice = other.slice();
inline for (fields) |field_info, i| {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(other_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
if (@sizeOf(field_info.field_type) != 0) {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(other_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
}
}
gpa.free(self.allocatedBytes());
self.* = other;
}
/// Create a copy of this list with a new backing store,
/// using the specified allocator.
pub fn clone(self: Self, gpa: *Allocator) !Self {
var result = Self{};
errdefer result.deinit(gpa);
try result.ensureCapacity(gpa, self.len);
result.len = self.len;
const self_slice = self.slice();
const result_slice = result.slice();
inline for (fields) |field_info, i| {
if (@sizeOf(field_info.field_type) != 0) {
const field = @intToEnum(Field, i);
// TODO we should be able to use std.mem.copy here but it causes a
// test failure on aarch64 with -OReleaseFast
const src_slice = mem.sliceAsBytes(self_slice.items(field));
const dst_slice = mem.sliceAsBytes(result_slice.items(field));
@memcpy(dst_slice.ptr, src_slice.ptr, src_slice.len);
}
}
return result;
}
fn capacityInBytes(capacity: usize) usize {
const sizes_vector: std.meta.Vector(sizes.bytes.len, usize) = sizes.bytes;
const capacity_vector = @splat(sizes.bytes.len, capacity);
+4 -4
View File
@@ -85,7 +85,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
i += 1; // skip over null byte
try result.setMove(key, value);
try result.putMove(key, value);
}
return result;
} else if (builtin.os.tag == .wasi) {
@@ -112,7 +112,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
var parts = mem.split(pair, "=");
const key = parts.next().?;
const value = parts.next().?;
try result.set(key, value);
try result.put(key, value);
}
return result;
} else if (builtin.link_libc) {
@@ -126,7 +126,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
while (line[end_i] != 0) : (end_i += 1) {}
const value = line[line_i + 1 .. end_i];
try result.set(key, value);
try result.put(key, value);
}
return result;
} else {
@@ -139,7 +139,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
while (line[end_i] != 0) : (end_i += 1) {}
const value = line[line_i + 1 .. end_i];
try result.set(key, value);
try result.put(key, value);
}
return result;
}
+9 -11
View File
@@ -144,9 +144,7 @@ pub fn generate(gpa: *Allocator, tree: ast.Tree) InnerError!Zir {
astgen.extra.items[imports_index] = astgen.addExtraAssumeCapacity(Zir.Inst.Imports{
.imports_len = @intCast(u32, astgen.imports.count()),
});
for (astgen.imports.items()) |entry| {
astgen.extra.appendAssumeCapacity(entry.key);
}
astgen.extra.appendSliceAssumeCapacity(astgen.imports.keys());
}
return Zir{
@@ -7932,13 +7930,13 @@ fn identAsString(astgen: *AstGen, ident_token: ast.TokenIndex) !u32 {
const gop = try astgen.string_table.getOrPut(gpa, key);
if (gop.found_existing) {
string_bytes.shrinkRetainingCapacity(str_index);
return gop.entry.value;
return gop.value_ptr.*;
} else {
// We have to dupe the key into the arena, otherwise the memory
// becomes invalidated when string_bytes gets data appended.
// TODO https://github.com/ziglang/zig/issues/8528
gop.entry.key = try astgen.arena.dupe(u8, key);
gop.entry.value = str_index;
gop.key_ptr.* = try astgen.arena.dupe(u8, key);
gop.value_ptr.* = str_index;
try string_bytes.append(gpa, 0);
return str_index;
}
@@ -7957,15 +7955,15 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !IndexSlice {
if (gop.found_existing) {
string_bytes.shrinkRetainingCapacity(str_index);
return IndexSlice{
.index = gop.entry.value,
.index = gop.value_ptr.*,
.len = @intCast(u32, key.len),
};
} else {
// We have to dupe the key into the arena, otherwise the memory
// becomes invalidated when string_bytes gets data appended.
// TODO https://github.com/ziglang/zig/issues/8528
gop.entry.key = try astgen.arena.dupe(u8, key);
gop.entry.value = str_index;
gop.key_ptr.* = try astgen.arena.dupe(u8, key);
gop.value_ptr.* = str_index;
// Still need a null byte because we are using the same table
// to lookup null terminated strings, so if we get a match, it has to
// be null terminated for that to work.
@@ -9122,10 +9120,10 @@ fn declareNewName(
return astgen.failNodeNotes(node, "redeclaration of '{s}'", .{
name,
}, &[_]u32{
try astgen.errNoteNode(gop.entry.value, "other declaration here", .{}),
try astgen.errNoteNode(gop.value_ptr.*, "other declaration here", .{}),
});
}
gop.entry.value = node;
gop.value_ptr.* = node;
break;
},
.top => break,
+4 -4
View File
@@ -90,10 +90,10 @@ pub const HashHelper = struct {
}
pub fn addStringSet(hh: *HashHelper, hm: std.StringArrayHashMapUnmanaged(void)) void {
const entries = hm.items();
hh.add(entries.len);
for (entries) |entry| {
hh.addBytes(entry.key);
const keys = hm.keys();
hh.add(keys.len);
for (keys) |key| {
hh.addBytes(key);
}
}
+114 -96
View File
@@ -729,18 +729,21 @@ fn addPackageTableToCacheHash(
) (error{OutOfMemory} || std.os.GetCwdError)!void {
const allocator = &arena.allocator;
const packages = try allocator.alloc(Package.Table.Entry, pkg_table.count());
const packages = try allocator.alloc(Package.Table.KV, pkg_table.count());
{
// Copy over the hashmap entries to our slice
var table_it = pkg_table.iterator();
var idx: usize = 0;
while (table_it.next()) |entry| : (idx += 1) {
packages[idx] = entry.*;
packages[idx] = .{
.key = entry.key_ptr.*,
.value = entry.value_ptr.*,
};
}
}
// Sort the slice by package name
std.sort.sort(Package.Table.Entry, packages, {}, struct {
fn lessThan(_: void, lhs: Package.Table.Entry, rhs: Package.Table.Entry) bool {
std.sort.sort(Package.Table.KV, packages, {}, struct {
fn lessThan(_: void, lhs: Package.Table.KV, rhs: Package.Table.KV) bool {
return std.mem.lessThan(u8, lhs.key, rhs.key);
}
}.lessThan);
@@ -1525,8 +1528,8 @@ pub fn destroy(self: *Compilation) void {
{
var it = self.crt_files.iterator();
while (it.next()) |entry| {
gpa.free(entry.key);
entry.value.deinit(gpa);
gpa.free(entry.key_ptr.*);
entry.value_ptr.deinit(gpa);
}
self.crt_files.deinit(gpa);
}
@@ -1554,14 +1557,14 @@ pub fn destroy(self: *Compilation) void {
glibc_file.deinit(gpa);
}
for (self.c_object_table.items()) |entry| {
entry.key.destroy(gpa);
for (self.c_object_table.keys()) |key| {
key.destroy(gpa);
}
self.c_object_table.deinit(gpa);
self.c_object_cache_digest_set.deinit(gpa);
for (self.failed_c_objects.items()) |entry| {
entry.value.destroy(gpa);
for (self.failed_c_objects.values()) |value| {
value.destroy(gpa);
}
self.failed_c_objects.deinit(gpa);
@@ -1578,8 +1581,8 @@ pub fn destroy(self: *Compilation) void {
}
pub fn clearMiscFailures(comp: *Compilation) void {
for (comp.misc_failures.items()) |*entry| {
entry.value.deinit(comp.gpa);
for (comp.misc_failures.values()) |*value| {
value.deinit(comp.gpa);
}
comp.misc_failures.deinit(comp.gpa);
comp.misc_failures = .{};
@@ -1599,9 +1602,10 @@ pub fn update(self: *Compilation) !void {
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each C object.
try self.c_object_work_queue.ensureUnusedCapacity(self.c_object_table.items().len);
for (self.c_object_table.items()) |entry| {
self.c_object_work_queue.writeItemAssumeCapacity(entry.key);
try self.c_object_work_queue.ensureUnusedCapacity(self.c_object_table.count());
for (self.c_object_table.keys()) |key| {
assert(@ptrToInt(key) != 0xaaaa_aaaa_aaaa_aaaa);
self.c_object_work_queue.writeItemAssumeCapacity(key);
}
const use_stage1 = build_options.omit_stage2 or
@@ -1620,8 +1624,8 @@ pub fn update(self: *Compilation) !void {
// it changed, and, if so, re-compute ZIR and then queue the job
// to update it.
try self.astgen_work_queue.ensureUnusedCapacity(module.import_table.count());
for (module.import_table.items()) |entry| {
self.astgen_work_queue.writeItemAssumeCapacity(entry.value);
for (module.import_table.values()) |value| {
self.astgen_work_queue.writeItemAssumeCapacity(value);
}
try self.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
@@ -1635,12 +1639,12 @@ pub fn update(self: *Compilation) !void {
// Process the deletion set. We use a while loop here because the
// deletion set may grow as we call `clearDecl` within this loop,
// and more unreferenced Decls are revealed.
while (module.deletion_set.entries.items.len != 0) {
const decl = module.deletion_set.entries.items[0].key;
while (module.deletion_set.count() != 0) {
const decl = module.deletion_set.keys()[0];
assert(decl.deletion_flag);
assert(decl.dependants.count() == 0);
const is_anon = if (decl.zir_decl_index == 0) blk: {
break :blk decl.namespace.anon_decls.swapRemove(decl) != null;
break :blk decl.namespace.anon_decls.swapRemove(decl);
} else false;
try module.clearDecl(decl, null);
@@ -1677,8 +1681,7 @@ pub fn update(self: *Compilation) !void {
// to reference the ZIR.
if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) {
if (self.bin_file.options.module) |module| {
for (module.import_table.items()) |entry| {
const file = entry.value;
for (module.import_table.values()) |file| {
file.unloadTree(self.gpa);
file.unloadSource(self.gpa);
}
@@ -1702,18 +1705,21 @@ pub fn totalErrorCount(self: *Compilation) usize {
var total: usize = self.failed_c_objects.count() + self.misc_failures.count();
if (self.bin_file.options.module) |module| {
total += module.failed_exports.items().len;
total += module.failed_exports.count();
for (module.failed_files.items()) |entry| {
if (entry.value) |_| {
total += 1;
} else {
const file = entry.key;
assert(file.zir_loaded);
const payload_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.compile_errors)];
assert(payload_index != 0);
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
total += header.data.items_len;
{
var it = module.failed_files.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.*) |_| {
total += 1;
} else {
const file = entry.key_ptr.*;
assert(file.zir_loaded);
const payload_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.compile_errors)];
assert(payload_index != 0);
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
total += header.data.items_len;
}
}
}
@@ -1721,14 +1727,14 @@ pub fn totalErrorCount(self: *Compilation) usize {
// When a parse error is introduced, we keep all the semantic analysis for
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (module.failed_decls.items()) |entry| {
if (entry.key.namespace.file_scope.okToReportErrors()) {
for (module.failed_decls.keys()) |key| {
if (key.namespace.file_scope.okToReportErrors()) {
total += 1;
}
}
if (module.emit_h) |emit_h| {
for (emit_h.failed_decls.items()) |entry| {
if (entry.key.namespace.file_scope.okToReportErrors()) {
for (emit_h.failed_decls.keys()) |key| {
if (key.namespace.file_scope.okToReportErrors()) {
total += 1;
}
}
@@ -1743,7 +1749,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
// Compile log errors only count if there are no other errors.
if (total == 0) {
if (self.bin_file.options.module) |module| {
total += @boolToInt(module.compile_log_decls.items().len != 0);
total += @boolToInt(module.compile_log_decls.count() != 0);
}
}
@@ -1757,57 +1763,67 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
defer errors.deinit();
for (self.failed_c_objects.items()) |entry| {
const c_object = entry.key;
const err_msg = entry.value;
// TODO these fields will need to be adjusted when we have proper
// C error reporting bubbling up.
try errors.append(.{
.src = .{
.src_path = try arena.allocator.dupe(u8, c_object.src.src_path),
.msg = try std.fmt.allocPrint(&arena.allocator, "unable to build C object: {s}", .{
err_msg.msg,
}),
.byte_offset = 0,
.line = err_msg.line,
.column = err_msg.column,
.source_line = null, // TODO
},
});
{
var it = self.failed_c_objects.iterator();
while (it.next()) |entry| {
const c_object = entry.key_ptr.*;
const err_msg = entry.value_ptr.*;
// TODO these fields will need to be adjusted when we have proper
// C error reporting bubbling up.
try errors.append(.{
.src = .{
.src_path = try arena.allocator.dupe(u8, c_object.src.src_path),
.msg = try std.fmt.allocPrint(&arena.allocator, "unable to build C object: {s}", .{
err_msg.msg,
}),
.byte_offset = 0,
.line = err_msg.line,
.column = err_msg.column,
.source_line = null, // TODO
},
});
}
}
for (self.misc_failures.items()) |entry| {
try AllErrors.addPlainWithChildren(&arena, &errors, entry.value.msg, entry.value.children);
for (self.misc_failures.values()) |*value| {
try AllErrors.addPlainWithChildren(&arena, &errors, value.msg, value.children);
}
if (self.bin_file.options.module) |module| {
for (module.failed_files.items()) |entry| {
if (entry.value) |msg| {
try AllErrors.add(module, &arena, &errors, msg.*);
} else {
// Must be ZIR errors. In order for ZIR errors to exist, the parsing
// must have completed successfully.
const tree = try entry.key.getTree(module.gpa);
assert(tree.errors.len == 0);
try AllErrors.addZir(&arena.allocator, &errors, entry.key);
}
}
for (module.failed_decls.items()) |entry| {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (entry.key.namespace.file_scope.okToReportErrors()) {
try AllErrors.add(module, &arena, &errors, entry.value.*);
}
}
if (module.emit_h) |emit_h| {
for (emit_h.failed_decls.items()) |entry| {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (entry.key.namespace.file_scope.okToReportErrors()) {
try AllErrors.add(module, &arena, &errors, entry.value.*);
{
var it = module.failed_files.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.*) |msg| {
try AllErrors.add(module, &arena, &errors, msg.*);
} else {
// Must be ZIR errors. In order for ZIR errors to exist, the parsing
// must have completed successfully.
const tree = try entry.key_ptr.*.getTree(module.gpa);
assert(tree.errors.len == 0);
try AllErrors.addZir(&arena.allocator, &errors, entry.key_ptr.*);
}
}
}
for (module.failed_exports.items()) |entry| {
try AllErrors.add(module, &arena, &errors, entry.value.*);
{
var it = module.failed_decls.iterator();
while (it.next()) |entry| {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (entry.key_ptr.*.namespace.file_scope.okToReportErrors()) {
try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*);
}
}
}
if (module.emit_h) |emit_h| {
var it = emit_h.failed_decls.iterator();
while (it.next()) |entry| {
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (entry.key_ptr.*.namespace.file_scope.okToReportErrors()) {
try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*);
}
}
}
for (module.failed_exports.values()) |value| {
try AllErrors.add(module, &arena, &errors, value.*);
}
}
@@ -1820,20 +1836,21 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
}
if (self.bin_file.options.module) |module| {
const compile_log_items = module.compile_log_decls.items();
if (errors.items.len == 0 and compile_log_items.len != 0) {
if (errors.items.len == 0 and module.compile_log_decls.count() != 0) {
const keys = module.compile_log_decls.keys();
const values = module.compile_log_decls.values();
// First one will be the error; subsequent ones will be notes.
const src_loc = compile_log_items[0].key.nodeOffsetSrcLoc(compile_log_items[0].value);
const src_loc = keys[0].nodeOffsetSrcLoc(values[0]);
const err_msg = Module.ErrorMsg{
.src_loc = src_loc,
.msg = "found compile log statement",
.notes = try self.gpa.alloc(Module.ErrorMsg, compile_log_items.len - 1),
.notes = try self.gpa.alloc(Module.ErrorMsg, module.compile_log_decls.count() - 1),
};
defer self.gpa.free(err_msg.notes);
for (compile_log_items[1..]) |entry, i| {
for (keys[1..]) |key, i| {
err_msg.notes[i] = .{
.src_loc = entry.key.nodeOffsetSrcLoc(entry.value),
.src_loc = key.nodeOffsetSrcLoc(values[i+1]),
.msg = "also here",
};
}
@@ -1898,6 +1915,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
}
while (self.c_object_work_queue.readItem()) |c_object| {
assert(@ptrToInt(c_object) != 0xaaaa_aaaa_aaaa_aaaa);
self.work_queue_wait_group.start();
try self.thread_pool.spawn(workerUpdateCObject, .{
self, c_object, &c_obj_prog_node, &self.work_queue_wait_group,
@@ -1964,7 +1982,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
continue;
},
else => {
try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1);
try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1);
module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
module.gpa,
decl.srcLoc(),
@@ -2036,7 +2054,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
const module = self.bin_file.options.module.?;
self.bin_file.updateDeclLineNumber(module, decl) catch |err| {
try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.items().len + 1);
try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1);
module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
module.gpa,
decl.srcLoc(),
@@ -2101,7 +2119,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
};
},
.windows_import_lib => |index| {
const link_lib = self.bin_file.options.system_libs.items()[index].key;
const link_lib = self.bin_file.options.system_libs.keys()[index];
mingw.buildImportLib(self, link_lib) catch |err| {
// TODO Surface more error details.
try self.setMiscFailure(
@@ -3023,7 +3041,7 @@ fn failCObjWithOwnedErrorMsg(
defer lock.release();
{
errdefer err_msg.destroy(comp.gpa);
try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.items().len + 1);
try comp.failed_c_objects.ensureCapacity(comp.gpa, comp.failed_c_objects.count() + 1);
}
comp.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg);
}
@@ -3953,8 +3971,8 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
// We need to save the inferred link libs to the cache, otherwise if we get a cache hit
// next time we will be missing these libs.
var libs_txt = std.ArrayList(u8).init(arena);
for (comp.bin_file.options.system_libs.items()[inferred_lib_start_index..]) |entry| {
try libs_txt.writer().print("{s}\n", .{entry.key});
for (comp.bin_file.options.system_libs.keys()[inferred_lib_start_index..]) |key| {
try libs_txt.writer().print("{s}\n", .{key});
}
try directory.handle.writeFile(libs_txt_basename, libs_txt.items);
}
@@ -4017,7 +4035,7 @@ fn createStage1Pkg(
var children = std.ArrayList(*stage1.Pkg).init(arena);
var it = pkg.table.iterator();
while (it.next()) |entry| {
try children.append(try createStage1Pkg(arena, entry.key, entry.value, child_pkg));
try children.append(try createStage1Pkg(arena, entry.key_ptr.*, entry.value_ptr.*, child_pkg));
}
break :blk children.items;
};
+113 -122
View File
@@ -268,15 +268,7 @@ pub const Decl = struct {
/// typed_value may need to be regenerated.
dependencies: DepsTable = .{},
/// The reason this is not `std.AutoArrayHashMapUnmanaged` is a workaround for
/// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself`
pub const DepsTable = std.ArrayHashMapUnmanaged(
*Decl,
void,
std.array_hash_map.getAutoHashFn(*Decl),
std.array_hash_map.getAutoEqlFn(*Decl),
false,
);
pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void);
pub fn clearName(decl: *Decl, gpa: *Allocator) void {
gpa.free(mem.spanZ(decl.name));
@@ -287,7 +279,7 @@ pub const Decl = struct {
const gpa = module.gpa;
log.debug("destroy {*} ({s})", .{ decl, decl.name });
if (decl.deletion_flag) {
module.deletion_set.swapRemoveAssertDiscard(decl);
assert(module.deletion_set.swapRemove(decl));
}
if (decl.has_tv) {
if (decl.getInnerNamespace()) |namespace| {
@@ -550,11 +542,11 @@ pub const Decl = struct {
}
fn removeDependant(decl: *Decl, other: *Decl) void {
decl.dependants.removeAssertDiscard(other);
assert(decl.dependants.swapRemove(other));
}
fn removeDependency(decl: *Decl, other: *Decl) void {
decl.dependencies.removeAssertDiscard(other);
assert(decl.dependencies.swapRemove(other));
}
};
@@ -683,7 +675,7 @@ pub const EnumFull = struct {
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.hash_u32, Value.eql, false);
pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false);
pub fn srcLoc(self: EnumFull) SrcLoc {
return .{
@@ -895,13 +887,13 @@ pub const Scope = struct {
var anon_decls = ns.anon_decls;
ns.anon_decls = .{};
for (decls.items()) |entry| {
entry.value.destroy(mod);
for (decls.values()) |value| {
value.destroy(mod);
}
decls.deinit(gpa);
for (anon_decls.items()) |entry| {
entry.key.destroy(mod);
for (anon_decls.keys()) |key| {
key.destroy(mod);
}
anon_decls.deinit(gpa);
}
@@ -924,15 +916,13 @@ pub const Scope = struct {
// TODO rework this code to not panic on OOM.
// (might want to coordinate with the clearDecl function)
for (decls.items()) |entry| {
const child_decl = entry.value;
for (decls.values()) |child_decl| {
mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory");
child_decl.destroy(mod);
}
decls.deinit(gpa);
for (anon_decls.items()) |entry| {
const child_decl = entry.key;
for (anon_decls.keys()) |child_decl| {
mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory");
child_decl.destroy(mod);
}
@@ -2120,9 +2110,11 @@ pub const InnerError = error{ OutOfMemory, AnalysisFail };
pub fn deinit(mod: *Module) void {
const gpa = mod.gpa;
for (mod.import_table.items()) |entry| {
gpa.free(entry.key);
entry.value.destroy(mod);
for (mod.import_table.keys()) |key| {
gpa.free(key);
}
for (mod.import_table.values()) |value| {
value.destroy(mod);
}
mod.import_table.deinit(gpa);
@@ -2130,16 +2122,16 @@ pub fn deinit(mod: *Module) void {
// The callsite of `Compilation.create` owns the `root_pkg`, however
// Module owns the builtin and std packages that it adds.
if (mod.root_pkg.table.remove("builtin")) |entry| {
gpa.free(entry.key);
entry.value.destroy(gpa);
if (mod.root_pkg.table.fetchRemove("builtin")) |kv| {
gpa.free(kv.key);
kv.value.destroy(gpa);
}
if (mod.root_pkg.table.remove("std")) |entry| {
gpa.free(entry.key);
entry.value.destroy(gpa);
if (mod.root_pkg.table.fetchRemove("std")) |kv| {
gpa.free(kv.key);
kv.value.destroy(gpa);
}
if (mod.root_pkg.table.remove("root")) |entry| {
gpa.free(entry.key);
if (mod.root_pkg.table.fetchRemove("root")) |kv| {
gpa.free(kv.key);
}
mod.compile_log_text.deinit(gpa);
@@ -2148,46 +2140,45 @@ pub fn deinit(mod: *Module) void {
mod.local_zir_cache.handle.close();
mod.global_zir_cache.handle.close();
for (mod.failed_decls.items()) |entry| {
entry.value.destroy(gpa);
for (mod.failed_decls.values()) |value| {
value.destroy(gpa);
}
mod.failed_decls.deinit(gpa);
if (mod.emit_h) |emit_h| {
for (emit_h.failed_decls.items()) |entry| {
entry.value.destroy(gpa);
for (emit_h.failed_decls.values()) |value| {
value.destroy(gpa);
}
emit_h.failed_decls.deinit(gpa);
emit_h.decl_table.deinit(gpa);
gpa.destroy(emit_h);
}
for (mod.failed_files.items()) |entry| {
if (entry.value) |msg| msg.destroy(gpa);
for (mod.failed_files.values()) |value| {
if (value) |msg| msg.destroy(gpa);
}
mod.failed_files.deinit(gpa);
for (mod.failed_exports.items()) |entry| {
entry.value.destroy(gpa);
for (mod.failed_exports.values()) |value| {
value.destroy(gpa);
}
mod.failed_exports.deinit(gpa);
mod.compile_log_decls.deinit(gpa);
for (mod.decl_exports.items()) |entry| {
const export_list = entry.value;
for (mod.decl_exports.values()) |export_list| {
gpa.free(export_list);
}
mod.decl_exports.deinit(gpa);
for (mod.export_owners.items()) |entry| {
freeExportList(gpa, entry.value);
for (mod.export_owners.values()) |value| {
freeExportList(gpa, value);
}
mod.export_owners.deinit(gpa);
var it = mod.global_error_set.iterator();
while (it.next()) |entry| {
gpa.free(entry.key);
var it = mod.global_error_set.keyIterator();
while (it.next()) |key| {
gpa.free(key.*);
}
mod.global_error_set.deinit(gpa);
@@ -2670,12 +2661,10 @@ fn updateZirRefs(gpa: *Allocator, file: *Scope.File, old_zir: Zir) !void {
}
if (decl.getInnerNamespace()) |namespace| {
for (namespace.decls.items()) |entry| {
const sub_decl = entry.value;
for (namespace.decls.values()) |sub_decl| {
try decl_stack.append(gpa, sub_decl);
}
for (namespace.anon_decls.items()) |entry| {
const sub_decl = entry.key;
for (namespace.anon_decls.keys()) |sub_decl| {
try decl_stack.append(gpa, sub_decl);
}
}
@@ -2769,8 +2758,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void {
// prior to re-analysis.
mod.deleteDeclExports(decl);
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
for (decl.dependencies.items()) |entry| {
const dep = entry.key;
for (decl.dependencies.keys()) |dep| {
dep.removeDependant(decl);
if (dep.dependants.count() == 0 and !dep.deletion_flag) {
log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{
@@ -2817,8 +2805,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void {
// We may need to chase the dependants and re-analyze them.
// However, if the decl is a function, and the type is the same, we do not need to.
if (type_changed or decl.ty.zigTypeTag() != .Fn) {
for (decl.dependants.items()) |entry| {
const dep = entry.key;
for (decl.dependants.keys()) |dep| {
switch (dep.analysis) {
.unreferenced => unreachable,
.in_progress => continue, // already doing analysis, ok
@@ -3128,7 +3115,7 @@ pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !vo
if (dependee.deletion_flag) {
dependee.deletion_flag = false;
mod.deletion_set.removeAssertDiscard(dependee);
assert(mod.deletion_set.swapRemove(dependee));
}
dependee.dependants.putAssumeCapacity(depender, {});
@@ -3154,7 +3141,7 @@ pub fn importPkg(mod: *Module, cur_pkg: *Package, pkg: *Package) !ImportFileResu
const gop = try mod.import_table.getOrPut(gpa, resolved_path);
if (gop.found_existing) return ImportFileResult{
.file = gop.entry.value,
.file = gop.value_ptr.*,
.is_new = false,
};
keep_resolved_path = true; // It's now owned by import_table.
@@ -3165,7 +3152,7 @@ pub fn importPkg(mod: *Module, cur_pkg: *Package, pkg: *Package) !ImportFileResu
const new_file = try gpa.create(Scope.File);
errdefer gpa.destroy(new_file);
gop.entry.value = new_file;
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.source = undefined,
@@ -3209,7 +3196,7 @@ pub fn importFile(
const gop = try mod.import_table.getOrPut(gpa, resolved_path);
if (gop.found_existing) return ImportFileResult{
.file = gop.entry.value,
.file = gop.value_ptr.*,
.is_new = false,
};
keep_resolved_path = true; // It's now owned by import_table.
@@ -3231,7 +3218,7 @@ pub fn importFile(
resolved_root_path, resolved_path, sub_file_path, import_string,
});
gop.entry.value = new_file;
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.source = undefined,
@@ -3366,7 +3353,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!vo
log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace });
new_decl.src_line = line;
new_decl.name = decl_name;
gop.entry.value = new_decl;
gop.value_ptr.* = new_decl;
// Exported decls, comptime decls, usingnamespace decls, and
// test decls if in test mode, get analyzed.
const want_analysis = is_exported or switch (decl_name_index) {
@@ -3385,7 +3372,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!vo
return;
}
gpa.free(decl_name);
const decl = gop.entry.value;
const decl = gop.value_ptr.*;
log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace });
// Update the AST node of the decl; even if its contents are unchanged, it may
// have been re-ordered.
@@ -3438,10 +3425,9 @@ pub fn clearDecl(
}
// Remove itself from its dependencies.
for (decl.dependencies.items()) |entry| {
const dep = entry.key;
for (decl.dependencies.keys()) |dep| {
dep.removeDependant(decl);
if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
if (dep.dependants.count() == 0 and !dep.deletion_flag) {
// We don't recursively perform a deletion here, because during the update,
// another reference to it may turn up.
dep.deletion_flag = true;
@@ -3451,8 +3437,7 @@ pub fn clearDecl(
decl.dependencies.clearRetainingCapacity();
// Anything that depends on this deleted decl needs to be re-analyzed.
for (decl.dependants.items()) |entry| {
const dep = entry.key;
for (decl.dependants.keys()) |dep| {
dep.removeDependency(decl);
if (outdated_decls) |map| {
map.putAssumeCapacity(dep, {});
@@ -3467,14 +3452,14 @@ pub fn clearDecl(
}
decl.dependants.clearRetainingCapacity();
if (mod.failed_decls.swapRemove(decl)) |entry| {
entry.value.destroy(gpa);
if (mod.failed_decls.fetchSwapRemove(decl)) |kv| {
kv.value.destroy(gpa);
}
if (mod.emit_h) |emit_h| {
if (emit_h.failed_decls.swapRemove(decl)) |entry| {
entry.value.destroy(gpa);
if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| {
kv.value.destroy(gpa);
}
emit_h.decl_table.removeAssertDiscard(decl);
assert(emit_h.decl_table.swapRemove(decl));
}
_ = mod.compile_log_decls.swapRemove(decl);
mod.deleteDeclExports(decl);
@@ -3510,7 +3495,7 @@ pub fn clearDecl(
if (decl.deletion_flag) {
decl.deletion_flag = false;
mod.deletion_set.swapRemoveAssertDiscard(decl);
assert(mod.deletion_set.swapRemove(decl));
}
decl.analysis = .unreferenced;
@@ -3519,12 +3504,12 @@ pub fn clearDecl(
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
fn deleteDeclExports(mod: *Module, decl: *Decl) void {
const kv = mod.export_owners.swapRemove(decl) orelse return;
const kv = mod.export_owners.fetchSwapRemove(decl) orelse return;
for (kv.value) |exp| {
if (mod.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| {
if (mod.decl_exports.getPtr(exp.exported_decl)) |value_ptr| {
// Remove exports with owner_decl matching the regenerating decl.
const list = decl_exports_kv.value;
const list = value_ptr.*;
var i: usize = 0;
var new_len = list.len;
while (i < new_len) {
@@ -3535,9 +3520,9 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
i += 1;
}
}
decl_exports_kv.value = mod.gpa.shrink(list, new_len);
value_ptr.* = mod.gpa.shrink(list, new_len);
if (new_len == 0) {
mod.decl_exports.removeAssertDiscard(exp.exported_decl);
assert(mod.decl_exports.swapRemove(exp.exported_decl));
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
@@ -3546,8 +3531,8 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
macho.deleteExport(exp.link.macho);
}
if (mod.failed_exports.swapRemove(exp)) |entry| {
entry.value.destroy(mod.gpa);
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
}
mod.gpa.free(exp.options.name);
mod.gpa.destroy(exp);
@@ -3623,12 +3608,12 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
log.debug("mark outdated {*} ({s})", .{ decl, decl.name });
try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl });
if (mod.failed_decls.swapRemove(decl)) |entry| {
entry.value.destroy(mod.gpa);
if (mod.failed_decls.fetchSwapRemove(decl)) |kv| {
kv.value.destroy(mod.gpa);
}
if (mod.emit_h) |emit_h| {
if (emit_h.failed_decls.swapRemove(decl)) |entry| {
entry.value.destroy(mod.gpa);
if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| {
kv.value.destroy(mod.gpa);
}
}
_ = mod.compile_log_decls.swapRemove(decl);
@@ -3686,17 +3671,24 @@ fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: ast.Node
}
/// Get error value for error tag `name`.
pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).Entry {
pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV {
const gop = try mod.global_error_set.getOrPut(mod.gpa, name);
if (gop.found_existing)
return gop.entry.*;
if (gop.found_existing) {
return std.StringHashMapUnmanaged(ErrorInt).KV{
.key = gop.key_ptr.*,
.value = gop.value_ptr.*,
};
}
errdefer mod.global_error_set.removeAssertDiscard(name);
errdefer assert(mod.global_error_set.remove(name));
try mod.error_name_list.ensureCapacity(mod.gpa, mod.error_name_list.items.len + 1);
gop.entry.key = try mod.gpa.dupe(u8, name);
gop.entry.value = @intCast(ErrorInt, mod.error_name_list.items.len);
mod.error_name_list.appendAssumeCapacity(gop.entry.key);
return gop.entry.*;
gop.key_ptr.* = try mod.gpa.dupe(u8, name);
gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len);
mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*);
return std.StringHashMapUnmanaged(ErrorInt).KV{
.key = gop.key_ptr.*,
.value = gop.value_ptr.*,
};
}
pub fn analyzeExport(
@@ -3712,8 +3704,8 @@ pub fn analyzeExport(
else => return mod.fail(scope, src, "unable to export type '{}'", .{exported_decl.ty}),
}
try mod.decl_exports.ensureCapacity(mod.gpa, mod.decl_exports.items().len + 1);
try mod.export_owners.ensureCapacity(mod.gpa, mod.export_owners.items().len + 1);
try mod.decl_exports.ensureCapacity(mod.gpa, mod.decl_exports.count() + 1);
try mod.export_owners.ensureCapacity(mod.gpa, mod.export_owners.count() + 1);
const new_export = try mod.gpa.create(Export);
errdefer mod.gpa.destroy(new_export);
@@ -3746,20 +3738,20 @@ pub fn analyzeExport(
// Add to export_owners table.
const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl);
if (!eo_gop.found_existing) {
eo_gop.entry.value = &[0]*Export{};
eo_gop.value_ptr.* = &[0]*Export{};
}
eo_gop.entry.value = try mod.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1);
eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export;
errdefer eo_gop.entry.value = mod.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1);
eo_gop.value_ptr.* = try mod.gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1);
eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export;
errdefer eo_gop.value_ptr.* = mod.gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1);
// Add to exported_decl table.
const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl);
if (!de_gop.found_existing) {
de_gop.entry.value = &[0]*Export{};
de_gop.value_ptr.* = &[0]*Export{};
}
de_gop.entry.value = try mod.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1);
de_gop.entry.value[de_gop.entry.value.len - 1] = new_export;
errdefer de_gop.entry.value = mod.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1);
de_gop.value_ptr.* = try mod.gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1);
de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export;
errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
}
pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst {
const const_inst = try arena.create(ir.Inst.Constant);
@@ -3851,7 +3843,7 @@ pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, b
pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void {
const scope_decl = scope.ownerDecl().?;
scope_decl.namespace.anon_decls.swapRemoveAssertDiscard(decl);
assert(scope_decl.namespace.anon_decls.swapRemove(decl));
decl.destroy(mod);
}
@@ -4001,8 +3993,8 @@ pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) In
{
errdefer err_msg.destroy(mod.gpa);
try mod.failed_decls.ensureCapacity(mod.gpa, mod.failed_decls.items().len + 1);
try mod.failed_files.ensureCapacity(mod.gpa, mod.failed_files.items().len + 1);
try mod.failed_decls.ensureCapacity(mod.gpa, mod.failed_decls.count() + 1);
try mod.failed_files.ensureCapacity(mod.gpa, mod.failed_files.count() + 1);
}
switch (scope.tag) {
.block => {
@@ -4420,8 +4412,8 @@ fn lockAndClearFileCompileError(mod: *Module, file: *Scope.File) void {
.never_loaded, .parse_failure, .astgen_failure => {
const lock = mod.comp.mutex.acquire();
defer lock.release();
if (mod.failed_files.swapRemove(file)) |entry| {
if (entry.value) |msg| msg.destroy(mod.gpa); // Delete previous error message.
if (mod.failed_files.fetchSwapRemove(file)) |kv| {
if (kv.value) |msg| msg.destroy(mod.gpa); // Delete previous error message.
}
},
}
@@ -4649,7 +4641,7 @@ pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void {
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
assert(!gop.found_existing);
gop.entry.value = .{
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = Value.initTag(.abi_align_default),
.default_val = Value.initTag(.unreachable_value),
@@ -4663,7 +4655,7 @@ pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void {
// TODO: if we need to report an error here, use a source location
// that points to this alignment expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
gop.entry.value.abi_align = (try sema.resolveInstConst(&block, src, align_ref)).val;
gop.value_ptr.abi_align = (try sema.resolveInstConst(&block, src, align_ref)).val;
}
if (has_default) {
const default_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
@@ -4671,7 +4663,7 @@ pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void {
// TODO: if we need to report an error here, use a source location
// that points to this default value expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
gop.entry.value.default_val = (try sema.resolveInstConst(&block, src, default_ref)).val;
gop.value_ptr.default_val = (try sema.resolveInstConst(&block, src, default_ref)).val;
}
}
}
@@ -4816,7 +4808,7 @@ pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void {
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
assert(!gop.found_existing);
gop.entry.value = .{
gop.value_ptr.* = .{
.ty = field_ty,
.abi_align = Value.initTag(.abi_align_default),
};
@@ -4825,7 +4817,7 @@ pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void {
// TODO: if we need to report an error here, use a source location
// that points to this alignment expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
gop.entry.value.abi_align = (try sema.resolveInstConst(&block, src, align_ref)).val;
gop.value_ptr.abi_align = (try sema.resolveInstConst(&block, src, align_ref)).val;
}
}
@@ -4841,9 +4833,7 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {
// deleted Decl pointers in the work queue.
var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa);
defer outdated_decls.deinit();
for (mod.import_table.items()) |import_table_entry| {
const file = import_table_entry.value;
for (mod.import_table.values()) |file| {
try outdated_decls.ensureUnusedCapacity(file.outdated_decls.items.len);
for (file.outdated_decls.items) |decl| {
outdated_decls.putAssumeCapacity(decl, {});
@@ -4872,8 +4862,8 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {
}
// Finally we can queue up re-analysis tasks after we have processed
// the deleted decls.
for (outdated_decls.items()) |entry| {
try mod.markOutdatedDecl(entry.key);
for (outdated_decls.keys()) |key| {
try mod.markOutdatedDecl(key);
}
}
@@ -4886,9 +4876,10 @@ pub fn processExports(mod: *Module) !void {
var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{};
defer symbol_exports.deinit(gpa);
for (mod.decl_exports.items()) |entry| {
const exported_decl = entry.key;
const exports = entry.value;
var it = mod.decl_exports.iterator();
while (it.next()) |entry| {
const exported_decl = entry.key_ptr.*;
const exports = entry.value_ptr.*;
for (exports) |new_export| {
const gop = try symbol_exports.getOrPut(gpa, new_export.options.name);
if (gop.found_existing) {
@@ -4899,13 +4890,13 @@ pub fn processExports(mod: *Module) !void {
new_export.options.name,
});
errdefer msg.destroy(gpa);
const other_export = gop.entry.value;
const other_export = gop.value_ptr.*;
const other_src_loc = other_export.getSrcLoc();
try mod.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{});
mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg);
new_export.status = .failed;
} else {
gop.entry.value = new_export;
gop.value_ptr.* = new_export;
}
}
mod.comp.bin_file.updateDeclExports(mod, exported_decl, exports) catch |err| switch (err) {
+3 -3
View File
@@ -100,9 +100,9 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
}
{
var it = pkg.table.iterator();
while (it.next()) |kv| {
gpa.free(kv.key);
var it = pkg.table.keyIterator();
while (it.next()) |key| {
gpa.free(key.*);
}
}
+23 -24
View File
@@ -1350,7 +1350,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind
};
// Maps field index to field_ptr index of where it was already initialized.
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.entries.items.len);
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count());
defer gpa.free(found_fields);
mem.set(Zir.Inst.Index, found_fields, 0);
@@ -1382,7 +1382,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind
for (found_fields) |field_ptr, i| {
if (field_ptr != 0) continue;
const field_name = struct_obj.fields.entries.items[i].key;
const field_name = struct_obj.fields.keys()[i];
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
@@ -1687,7 +1687,7 @@ fn zirCompileLog(
const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, sema.owner_decl);
if (!gop.found_existing) {
gop.entry.value = src_node;
gop.value_ptr.* = src_node;
}
return sema.mod.constInst(sema.arena, src, .{
.ty = Type.initTag(.void),
@@ -1954,7 +1954,7 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
const section_index = struct_obj.fields.getIndex("section").?;
const export_name = try fields[name_index].toAllocatedBytes(sema.arena);
const linkage = fields[linkage_index].toEnum(
struct_obj.fields.items()[linkage_index].value.ty,
struct_obj.fields.values()[linkage_index].ty,
std.builtin.GlobalLinkage,
);
@@ -2426,12 +2426,12 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
const src = inst_data.src();
// Create an anonymous error set type with only this error value, and return the value.
const entry = try sema.mod.getErrorValue(inst_data.get(sema.code));
const result_type = try Type.Tag.error_set_single.create(sema.arena, entry.key);
const kv = try sema.mod.getErrorValue(inst_data.get(sema.code));
const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key);
return sema.mod.constInst(sema.arena, src, .{
.ty = result_type,
.val = try Value.Tag.@"error".create(sema.arena, .{
.name = entry.key,
.name = kv.key,
}),
});
}
@@ -2558,10 +2558,10 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
}
const new_names = try sema.arena.alloc([]const u8, set.count());
var it = set.iterator();
var it = set.keyIterator();
var i: usize = 0;
while (it.next()) |entry| : (i += 1) {
new_names[i] = entry.key;
while (it.next()) |key| : (i += 1) {
new_names[i] = key.*;
}
const new_error_set = try sema.arena.create(Module.ErrorSet);
@@ -2636,7 +2636,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
.enum_full => {
const enum_full = enum_tag.ty.castTag(.enum_full).?.data;
if (enum_full.values.count() != 0) {
const val = enum_full.values.entries.items[field_index].key;
const val = enum_full.values.keys()[field_index];
return mod.constInst(arena, src, .{
.ty = int_tag_ty,
.val = val,
@@ -4360,7 +4360,7 @@ fn validateSwitchItemBool(
}
}
const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.hash, Value.eql, std.hash_map.DefaultMaxLoadPercentage);
const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage);
fn validateSwitchItemSparse(
sema: *Sema,
@@ -4371,8 +4371,8 @@ fn validateSwitchItemSparse(
switch_prong_src: Module.SwitchProngSrc,
) InnerError!void {
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
const entry = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return;
return sema.validateSwitchDupe(block, entry.value, switch_prong_src, src_node_offset);
const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return;
return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset);
}
fn validateSwitchNoRange(
@@ -5470,12 +5470,12 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
// Maps field index to field_type index of where it was already initialized.
// For making sure all fields are accounted for and no fields are duplicated.
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.entries.items.len);
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count());
defer gpa.free(found_fields);
mem.set(Zir.Inst.Index, found_fields, 0);
// The init values to use for the struct instance.
const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.entries.items.len);
const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.count());
defer gpa.free(field_inits);
var field_i: u32 = 0;
@@ -5513,9 +5513,9 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
if (field_type_inst != 0) continue;
// Check if the field has a default init.
const field = struct_obj.fields.entries.items[i].value;
const field = struct_obj.fields.values()[i];
if (field.default_val.tag() == .unreachable_value) {
const field_name = struct_obj.fields.entries.items[i].key;
const field_name = struct_obj.fields.keys()[i];
const template = "missing struct field: {s}";
const args = .{field_name};
if (root_msg) |msg| {
@@ -6402,7 +6402,7 @@ fn analyzeStructFieldPtr(
const field_index = struct_obj.fields.getIndex(field_name) orelse
return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name);
const field = struct_obj.fields.entries.items[field_index].value;
const field = struct_obj.fields.values()[field_index];
const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One);
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
@@ -6438,7 +6438,7 @@ fn analyzeUnionFieldPtr(
const field_index = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name);
const field = union_obj.fields.entries.items[field_index].value;
const field = union_obj.fields.values()[field_index];
const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One);
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| {
@@ -7476,9 +7476,8 @@ fn typeHasOnePossibleValue(
.@"struct" => {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const s = resolved_ty.castTag(.@"struct").?.data;
for (s.fields.entries.items) |entry| {
const field_ty = entry.value.ty;
if ((try sema.typeHasOnePossibleValue(block, src, field_ty)) == null) {
for (s.fields.values()) |value| {
if ((try sema.typeHasOnePossibleValue(block, src, value.ty)) == null) {
return null;
}
}
@@ -7488,7 +7487,7 @@ fn typeHasOnePossibleValue(
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const enum_full = resolved_ty.castTag(.enum_full).?.data;
if (enum_full.fields.count() == 1) {
return enum_full.values.entries.items[0].key;
return enum_full.values.keys()[0];
} else {
return null;
}
+4 -3
View File
@@ -696,10 +696,11 @@ const DumpTzir = struct {
std.debug.print("Module.Function(name={s}):\n", .{dtz.module_fn.owner_decl.name});
for (dtz.const_table.items()) |entry| {
const constant = entry.key.castTag(.constant).?;
var it = dtz.const_table.iterator();
while (it.next()) |entry| {
const constant = entry.key_ptr.*.castTag(.constant).?;
try writer.print(" @{d}: {} = {};\n", .{
entry.value, constant.base.ty, constant.val,
entry.value_ptr.*, constant.base.ty, constant.val,
});
}
+35 -26
View File
@@ -794,7 +794,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table;
try table.ensureCapacity(self.gpa, table.items().len + additional_count);
try table.ensureCapacity(self.gpa, table.count() + additional_count);
}
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
@@ -808,12 +808,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
gop.entry.value = .{
gop.value_ptr.* = .{
.off = undefined,
.relocs = .{},
};
}
try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
try gop.value_ptr.relocs.append(self.gpa, @intCast(u32, index));
},
.none => {},
}
@@ -2877,58 +2877,67 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// assert that parent_branch.free_registers equals the saved_then_branch.free_registers
// rather than assigning it.
const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2];
try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.items().len +
else_branch.inst_table.items().len);
for (else_branch.inst_table.items()) |else_entry| {
const canon_mcv = if (saved_then_branch.inst_table.swapRemove(else_entry.key)) |then_entry| blk: {
try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() +
else_branch.inst_table.count());
const else_slice = else_branch.inst_table.entries.slice();
const else_keys = else_slice.items(.key);
const else_values = else_slice.items(.value);
for (else_keys) |else_key, else_idx| {
const else_value = else_values[else_idx];
const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
// The instruction's MCValue is overridden in both branches.
parent_branch.inst_table.putAssumeCapacity(else_entry.key, then_entry.value);
if (else_entry.value == .dead) {
parent_branch.inst_table.putAssumeCapacity(else_key, then_entry.value);
if (else_value == .dead) {
assert(then_entry.value == .dead);
continue;
}
break :blk then_entry.value;
} else blk: {
if (else_entry.value == .dead)
if (else_value == .dead)
continue;
// The instruction is only overridden in the else branch.
var i: usize = self.branch_stack.items.len - 2;
while (true) {
i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead?
if (self.branch_stack.items[i].inst_table.get(else_entry.key)) |mcv| {
if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| {
assert(mcv != .dead);
break :blk mcv;
}
}
};
log.debug("consolidating else_entry {*} {}=>{}", .{ else_entry.key, else_entry.value, canon_mcv });
log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv });
// TODO make sure the destination stack offset / register does not already have something
// going on there.
try self.setRegOrMem(inst.base.src, else_entry.key.ty, canon_mcv, else_entry.value);
try self.setRegOrMem(inst.base.src, else_key.ty, canon_mcv, else_value);
// TODO track the new register / stack allocation
}
try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.items().len +
saved_then_branch.inst_table.items().len);
for (saved_then_branch.inst_table.items()) |then_entry| {
try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() +
saved_then_branch.inst_table.count());
const then_slice = saved_then_branch.inst_table.entries.slice();
const then_keys = then_slice.items(.key);
const then_values = then_slice.items(.value);
for (then_keys) |then_key, then_idx| {
const then_value = then_values[then_idx];
// We already deleted the items from this table that matched the else_branch.
// So these are all instructions that are only overridden in the then branch.
parent_branch.inst_table.putAssumeCapacity(then_entry.key, then_entry.value);
if (then_entry.value == .dead)
parent_branch.inst_table.putAssumeCapacity(then_key, then_value);
if (then_value == .dead)
continue;
const parent_mcv = blk: {
var i: usize = self.branch_stack.items.len - 2;
while (true) {
i -= 1;
if (self.branch_stack.items[i].inst_table.get(then_entry.key)) |mcv| {
if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| {
assert(mcv != .dead);
break :blk mcv;
}
}
};
log.debug("consolidating then_entry {*} {}=>{}", .{ then_entry.key, parent_mcv, then_entry.value });
log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value });
// TODO make sure the destination stack offset / register does not already have something
// going on there.
try self.setRegOrMem(inst.base.src, then_entry.key.ty, parent_mcv, then_entry.value);
try self.setRegOrMem(inst.base.src, then_key.ty, parent_mcv, then_value);
// TODO track the new register / stack allocation
}
@@ -3028,7 +3037,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// block results.
.mcv = MCValue{ .none = {} },
});
const block_data = &self.blocks.getEntry(inst).?.value;
const block_data = self.blocks.getPtr(inst).?;
defer block_data.relocs.deinit(self.gpa);
try self.genBody(inst.body);
@@ -3109,7 +3118,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn br(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue {
const block_data = &self.blocks.getEntry(block).?.value;
const block_data = self.blocks.getPtr(block).?;
if (operand.ty.hasCodeGenBits()) {
const operand_mcv = try self.resolveInst(operand);
@@ -3124,7 +3133,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn brVoid(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block) !MCValue {
const block_data = &self.blocks.getEntry(block).?.value;
const block_data = self.blocks.getPtr(block).?;
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.ensureCapacity(self.gpa, block_data.relocs.items.len + 1);
@@ -4118,9 +4127,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const branch = &self.branch_stack.items[0];
const gop = try branch.inst_table.getOrPut(self.gpa, inst);
if (!gop.found_existing) {
gop.entry.value = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val });
gop.value_ptr.* = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val });
}
return gop.entry.value;
return gop.value_ptr.*;
}
return self.getResolvedInstValue(inst);
+10 -7
View File
@@ -39,7 +39,7 @@ const BlockData = struct {
};
pub const CValueMap = std.AutoHashMap(*Inst, CValue);
pub const TypedefMap = std.HashMap(Type, struct { name: []const u8, rendered: []u8 }, Type.hash, Type.eql, std.hash_map.default_max_load_percentage);
pub const TypedefMap = std.HashMap(Type, struct { name: []const u8, rendered: []u8 }, Type.HashContext, std.hash_map.default_max_load_percentage);
fn formatTypeAsCIdentifier(
data: Type,
@@ -309,7 +309,7 @@ pub const DeclGen = struct {
.enum_full, .enum_nonexhaustive => {
const enum_full = t.cast(Type.Payload.EnumFull).?.data;
if (enum_full.values.count() != 0) {
const tag_val = enum_full.values.entries.items[field_index].key;
const tag_val = enum_full.values.keys()[field_index];
return dg.renderValue(writer, enum_full.tag_ty, tag_val);
} else {
return writer.print("{d}", .{field_index});
@@ -493,10 +493,13 @@ pub const DeclGen = struct {
defer buffer.deinit();
try buffer.appendSlice("typedef struct {\n");
for (struct_obj.fields.entries.items) |entry| {
try buffer.append(' ');
try dg.renderType(buffer.writer(), entry.value.ty);
try buffer.writer().print(" {s};\n", .{fmtIdent(entry.key)});
{
var it = struct_obj.fields.iterator();
while (it.next()) |entry| {
try buffer.append(' ');
try dg.renderType(buffer.writer(), entry.value_ptr.ty);
try buffer.writer().print(" {s};\n", .{fmtIdent(entry.key_ptr.*)});
}
}
try buffer.appendSlice("} ");
@@ -1186,7 +1189,7 @@ fn genStructFieldPtr(o: *Object, inst: *Inst.StructFieldPtr) !CValue {
const writer = o.writer();
const struct_ptr = try o.resolveInst(inst.struct_ptr);
const struct_obj = inst.struct_ptr.ty.elemType().castTag(.@"struct").?.data;
const field_name = struct_obj.fields.entries.items[inst.field_index].key;
const field_name = struct_obj.fields.keys()[inst.field_index];
const local = try o.allocLocal(inst.base.ty, .Const);
switch (struct_ptr) {
+1 -1
View File
@@ -789,7 +789,7 @@ pub const FuncGen = struct {
.break_vals = &break_vals,
});
defer {
self.blocks.removeAssertDiscard(inst);
assert(self.blocks.remove(inst));
break_bbs.deinit(self.gpa());
break_vals.deinit(self.gpa());
}
+7 -6
View File
@@ -2,6 +2,7 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const Target = std.Target;
const log = std.log.scoped(.codegen);
const assert = std.debug.assert;
const spec = @import("spirv/spec.zig");
const Opcode = spec.Opcode;
@@ -17,7 +18,7 @@ const Inst = ir.Inst;
pub const Word = u32;
pub const ResultId = u32;
pub const TypeMap = std.HashMap(Type, ResultId, Type.hash, Type.eql, std.hash_map.default_max_load_percentage);
pub const TypeMap = std.HashMap(Type, u32, Type.HashContext, std.hash_map.default_max_load_percentage);
pub const InstMap = std.AutoHashMap(*Inst, ResultId);
const IncomingBlock = struct {
@@ -141,16 +142,16 @@ pub const SPIRVModule = struct {
const path = decl.namespace.file_scope.sub_file_path;
const result = try self.file_names.getOrPut(path);
if (!result.found_existing) {
result.entry.value = self.allocResultId();
try writeInstructionWithString(&self.binary.debug_strings, .OpString, &[_]Word{result.entry.value}, path);
result.value_ptr.* = self.allocResultId();
try writeInstructionWithString(&self.binary.debug_strings, .OpString, &[_]Word{result.value_ptr.*}, path);
try writeInstruction(&self.binary.debug_strings, .OpSource, &[_]Word{
@enumToInt(spec.SourceLanguage.Unknown), // TODO: Register Zig source language.
0, // TODO: Zig version as u32?
result.entry.value,
result.value_ptr.*,
});
}
return result.entry.value;
return result.value_ptr.*;
}
};
@@ -847,7 +848,7 @@ pub const DeclGen = struct {
.incoming_blocks = &incoming_blocks,
});
defer {
self.blocks.removeAssertDiscard(inst);
assert(self.blocks.remove(inst));
incoming_blocks.deinit(self.spv.gpa);
}
+3 -3
View File
@@ -625,10 +625,10 @@ pub const Context = struct {
const struct_data: *Module.Struct = ty.castTag(.@"struct").?.data;
const fields_len = @intCast(u32, struct_data.fields.count());
try self.locals.ensureCapacity(self.gpa, self.locals.items.len + fields_len);
for (struct_data.fields.items()) |entry| {
for (struct_data.fields.values()) |*value| {
const val_type = try self.genValtype(
.{ .node_offset = struct_data.node_offset },
entry.value.ty,
value.ty,
);
self.locals.appendAssumeCapacity(val_type);
self.local_index += 1;
@@ -1018,7 +1018,7 @@ pub const Context = struct {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
if (enum_full.values.count() != 0) {
const tag_val = enum_full.values.entries.items[field_index.data].key;
const tag_val = enum_full.values.keys()[field_index.data];
try self.emitConstant(src, tag_val, enum_full.tag_ty);
} else {
try writer.writeByte(wasm.opcode(.i32_const));
+2 -2
View File
@@ -252,7 +252,7 @@ pub const LibCInstallation = struct {
// Detect infinite loops.
const inf_loop_env_key = "ZIG_IS_DETECTING_LIBC_PATHS";
if (env_map.get(inf_loop_env_key) != null) return error.ZigIsTheCCompiler;
try env_map.set(inf_loop_env_key, "1");
try env_map.put(inf_loop_env_key, "1");
const exec_res = std.ChildProcess.exec(.{
.allocator = allocator,
@@ -564,7 +564,7 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 {
// Detect infinite loops.
const inf_loop_env_key = "ZIG_IS_DETECTING_LIBC_PATHS";
if (env_map.get(inf_loop_env_key) != null) return error.ZigIsTheCCompiler;
try env_map.set(inf_loop_env_key, "1");
try env_map.put(inf_loop_env_key, "1");
const exec_res = std.ChildProcess.exec(.{
.allocator = allocator,
+8 -8
View File
@@ -162,7 +162,7 @@ pub const File = struct {
};
/// For DWARF .debug_info.
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.HashContext, std.hash_map.default_max_load_percentage);
/// For DWARF .debug_info.
pub const DbgInfoTypeReloc = struct {
@@ -406,8 +406,8 @@ pub const File = struct {
const full_out_path = try emit.directory.join(comp.gpa, &[_][]const u8{emit.sub_path});
defer comp.gpa.free(full_out_path);
assert(comp.c_object_table.count() == 1);
const the_entry = comp.c_object_table.items()[0];
const cached_pp_file_path = the_entry.key.status.success.object_path;
const the_key = comp.c_object_table.keys()[0];
const cached_pp_file_path = the_key.status.success.object_path;
try fs.cwd().copyFile(cached_pp_file_path, fs.cwd(), full_out_path, .{});
return;
}
@@ -545,8 +545,8 @@ pub const File = struct {
base.releaseLock();
try man.addListOfFiles(base.options.objects);
for (comp.c_object_table.items()) |entry| {
_ = try man.addFile(entry.key.status.success.object_path, null);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
try man.addOptionalFile(compiler_rt_path);
@@ -580,12 +580,12 @@ pub const File = struct {
var object_files = std.ArrayList([*:0]const u8).init(base.allocator);
defer object_files.deinit();
try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.items().len + 2);
try object_files.ensureCapacity(base.options.objects.len + comp.c_object_table.count() + 2);
for (base.options.objects) |obj_path| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, obj_path));
}
for (comp.c_object_table.items()) |entry| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, entry.key.status.success.object_path));
for (comp.c_object_table.keys()) |key| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, key.status.success.object_path));
}
if (module_obj_path) |p| {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
+27 -26
View File
@@ -70,8 +70,8 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
}
pub fn deinit(self: *C) void {
for (self.decl_table.items()) |entry| {
self.freeDecl(entry.key);
for (self.decl_table.keys()) |key| {
deinitDecl(self.base.allocator, key);
}
self.decl_table.deinit(self.base.allocator);
}
@@ -80,13 +80,17 @@ pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
_ = self.decl_table.swapRemove(decl);
decl.link.c.code.deinit(self.base.allocator);
decl.fn_link.c.fwd_decl.deinit(self.base.allocator);
var it = decl.fn_link.c.typedefs.iterator();
while (it.next()) |some| {
self.base.allocator.free(some.value.rendered);
deinitDecl(self.base.allocator, decl);
}
fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void {
decl.link.c.code.deinit(gpa);
decl.fn_link.c.fwd_decl.deinit(gpa);
var it = decl.fn_link.c.typedefs.valueIterator();
while (it.next()) |value| {
gpa.free(value.rendered);
}
decl.fn_link.c.typedefs.deinit(self.base.allocator);
decl.fn_link.c.typedefs.deinit(gpa);
}
pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
@@ -101,9 +105,9 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
const code = &decl.link.c.code;
fwd_decl.shrinkRetainingCapacity(0);
{
var it = typedefs.iterator();
while (it.next()) |entry| {
module.gpa.free(entry.value.rendered);
var it = typedefs.valueIterator();
while (it.next()) |value| {
module.gpa.free(value.rendered);
}
}
typedefs.clearRetainingCapacity();
@@ -128,9 +132,9 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
object.blocks.deinit(module.gpa);
object.code.deinit();
object.dg.fwd_decl.deinit();
var it = object.dg.typedefs.iterator();
while (it.next()) |some| {
module.gpa.free(some.value.rendered);
var it = object.dg.typedefs.valueIterator();
while (it.next()) |value| {
module.gpa.free(value.rendered);
}
object.dg.typedefs.deinit();
}
@@ -194,31 +198,30 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
if (module.global_error_set.size == 0) break :render_errors;
var it = module.global_error_set.iterator();
while (it.next()) |entry| {
try err_typedef_writer.print("#define zig_error_{s} {d}\n", .{ entry.key, entry.value });
try err_typedef_writer.print("#define zig_error_{s} {d}\n", .{ entry.key_ptr.*, entry.value_ptr.* });
}
try err_typedef_writer.writeByte('\n');
}
var fn_count: usize = 0;
var typedefs = std.HashMap(Type, []const u8, Type.hash, Type.eql, std.hash_map.default_max_load_percentage).init(comp.gpa);
var typedefs = std.HashMap(Type, []const u8, Type.HashContext, std.hash_map.default_max_load_percentage).init(comp.gpa);
defer typedefs.deinit();
// Typedefs, forward decls and non-functions first.
// TODO: performance investigation: would keeping a list of Decls that we should
// generate, rather than querying here, be faster?
for (self.decl_table.items()) |kv| {
const decl = kv.key;
for (self.decl_table.keys()) |decl| {
if (!decl.has_tv) continue;
const buf = buf: {
if (decl.val.castTag(.function)) |_| {
var it = decl.fn_link.c.typedefs.iterator();
while (it.next()) |new| {
if (typedefs.get(new.key)) |previous| {
try err_typedef_writer.print("typedef {s} {s};\n", .{ previous, new.value.name });
if (typedefs.get(new.key_ptr.*)) |previous| {
try err_typedef_writer.print("typedef {s} {s};\n", .{ previous, new.value_ptr.name });
} else {
try typedefs.ensureCapacity(typedefs.capacity() + 1);
try err_typedef_writer.writeAll(new.value.rendered);
typedefs.putAssumeCapacityNoClobber(new.key, new.value.name);
try err_typedef_writer.writeAll(new.value_ptr.rendered);
typedefs.putAssumeCapacityNoClobber(new.key_ptr.*, new.value_ptr.name);
}
}
fn_count += 1;
@@ -242,8 +245,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
// Now the function bodies.
try all_buffers.ensureCapacity(all_buffers.items.len + fn_count);
for (self.decl_table.items()) |kv| {
const decl = kv.key;
for (self.decl_table.keys()) |decl| {
if (!decl.has_tv) continue;
if (decl.val.castTag(.function)) |_| {
const buf = decl.link.c.code.items;
@@ -278,8 +280,7 @@ pub fn flushEmitH(module: *Module) !void {
.iov_len = zig_h.len,
});
for (emit_h.decl_table.items()) |kv| {
const decl = kv.key;
for (emit_h.decl_table.keys()) |decl| {
const decl_emit_h = decl.getEmitH(module);
const buf = decl_emit_h.fwd_decl.items;
all_buffers.appendAssumeCapacity(.{
+9 -9
View File
@@ -735,7 +735,7 @@ pub fn updateDeclExports(self: *Coff, module: *Module, decl: *Module.Decl, expor
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
@@ -746,7 +746,7 @@ pub fn updateDeclExports(self: *Coff, module: *Module, decl: *Module.Decl, expor
if (mem.eql(u8, exp.options.name, "_start")) {
self.entry_addr = decl.link.coff.getVAddr(self.*) - default_image_base;
} else {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: Exports other than '_start'", .{}),
@@ -861,8 +861,8 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
self.base.releaseLock();
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
_ = try man.addFile(entry.key.status.success.object_path, null);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
man.hash.addOptional(self.base.options.stack_size_override);
@@ -928,7 +928,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
break :blk self.base.options.objects[0];
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.items()[0].key.status.success.object_path;
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk p;
@@ -1026,8 +1026,8 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
try argv.appendSlice(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
try argv.append(entry.key.status.success.object_path);
for (comp.c_object_table.keys()) |key| {
try argv.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
@@ -1221,8 +1221,8 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
try argv.append(comp.compiler_rt_static_lib.?.full_object_path);
}
for (self.base.options.system_libs.items()) |entry| {
const lib_basename = try allocPrint(arena, "{s}.lib", .{entry.key});
for (self.base.options.system_libs.keys()) |key| {
const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
if (comp.crt_files.get(lib_basename)) |crt_file| {
try argv.append(crt_file.full_object_path);
} else {
+33 -31
View File
@@ -1318,8 +1318,8 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
try man.addOptionalFile(self.base.options.linker_script);
try man.addOptionalFile(self.base.options.version_script);
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
_ = try man.addFile(entry.key.status.success.object_path, null);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
try man.addOptionalFile(compiler_rt_path);
@@ -1394,7 +1394,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
break :blk self.base.options.objects[0];
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.items()[0].key.status.success.object_path;
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk p;
@@ -1518,8 +1518,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
var test_path = std.ArrayList(u8).init(self.base.allocator);
defer test_path.deinit();
for (self.base.options.lib_dirs) |lib_dir_path| {
for (self.base.options.system_libs.items()) |entry| {
const link_lib = entry.key;
for (self.base.options.system_libs.keys()) |link_lib| {
test_path.shrinkRetainingCapacity(0);
const sep = fs.path.sep_str;
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.so", .{ lib_dir_path, link_lib });
@@ -1568,8 +1567,8 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
// Positional arguments to the linker such as object files.
try argv.appendSlice(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
try argv.append(entry.key.status.success.object_path);
for (comp.c_object_table.keys()) |key| {
try argv.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
@@ -1598,10 +1597,9 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
// Shared libraries.
if (is_exe_or_dyn_lib) {
const system_libs = self.base.options.system_libs.items();
const system_libs = self.base.options.system_libs.keys();
try argv.ensureCapacity(argv.items.len + system_libs.len);
for (system_libs) |entry| {
const link_lib = entry.key;
for (system_libs) |link_lib| {
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
// (the check for that needs to be earlier), but they could be full paths to .so files, in which
// case we want to avoid prepending "-l".
@@ -2168,9 +2166,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
defer {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.relocs.deinit(self.base.allocator);
var it = dbg_info_type_relocs.valueIterator();
while (it.next()) |value| {
value.relocs.deinit(self.base.allocator);
}
dbg_info_type_relocs.deinit(self.base.allocator);
}
@@ -2235,12 +2233,12 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
if (fn_ret_has_bits) {
const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type);
if (!gop.found_existing) {
gop.entry.value = .{
gop.value_ptr.* = .{
.off = undefined,
.relocs = .{},
};
}
try gop.entry.value.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len));
try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
}
dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
@@ -2448,24 +2446,28 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(entry.key, &dbg_info_buffer);
{
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer);
}
}
try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len));
// Now that we have the offset assigned we can finally perform type relocations.
it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
for (entry.value.relocs.items) |off| {
mem.writeInt(
u32,
dbg_info_buffer.items[off..][0..4],
text_block.dbg_info_off + entry.value.off,
target_endian,
);
{
// Now that we have the offset assigned we can finally perform type relocations.
var it = dbg_info_type_relocs.valueIterator();
while (it.next()) |value| {
for (value.relocs.items) |off| {
mem.writeInt(
u32,
dbg_info_buffer.items[off..][0..4],
text_block.dbg_info_off + value.off,
target_endian,
);
}
}
}
@@ -2636,7 +2638,7 @@ pub fn updateDeclExports(
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
@@ -2654,7 +2656,7 @@ pub fn updateDeclExports(
},
.Weak => elf.STB_WEAK,
.LinkOnce => {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
+68 -60
View File
@@ -567,8 +567,8 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
try man.addOptionalFile(self.base.options.linker_script);
try man.addOptionalFile(self.base.options.version_script);
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
_ = try man.addFile(entry.key.status.success.object_path, null);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
// We can skip hashing libc and libc++ components that we are in charge of building from Zig
@@ -632,7 +632,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
break :blk self.base.options.objects[0];
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.items()[0].key.status.success.object_path;
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk p;
@@ -682,8 +682,8 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
try positionals.appendSlice(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
try positionals.append(entry.key.status.success.object_path);
for (comp.c_object_table.keys()) |key| {
try positionals.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
@@ -702,9 +702,8 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
var libs = std.ArrayList([]const u8).init(arena);
var search_lib_names = std.ArrayList([]const u8).init(arena);
const system_libs = self.base.options.system_libs.items();
for (system_libs) |entry| {
const link_lib = entry.key;
const system_libs = self.base.options.system_libs.keys();
for (system_libs) |link_lib| {
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
// (the check for that needs to be earlier), but they could be full paths to .dylib files, in which
// case we want to avoid prepending "-l".
@@ -804,8 +803,8 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
var rpaths = std.ArrayList([]const u8).init(arena);
try rpaths.ensureCapacity(rpath_table.count());
for (rpath_table.items()) |entry| {
rpaths.appendAssumeCapacity(entry.key);
for (rpath_table.keys()) |*key| {
rpaths.appendAssumeCapacity(key.*);
}
if (self.base.options.verbose_link) {
@@ -973,8 +972,8 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
// Positional arguments to the linker such as object files.
try argv.appendSlice(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
try argv.append(entry.key.status.success.object_path);
for (comp.c_object_table.keys()) |key| {
try argv.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
try argv.append(p);
@@ -986,10 +985,9 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
}
// Shared libraries.
const system_libs = self.base.options.system_libs.items();
const system_libs = self.base.options.system_libs.keys();
try argv.ensureCapacity(argv.items.len + system_libs.len);
for (system_libs) |entry| {
const link_lib = entry.key;
for (system_libs) |link_lib| {
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
// (the check for that needs to be earlier), but they could be full paths to .dylib files, in which
// case we want to avoid prepending "-l".
@@ -1153,12 +1151,12 @@ pub fn deinit(self: *MachO) void {
if (self.d_sym) |*ds| {
ds.deinit(self.base.allocator);
}
for (self.lazy_imports.items()) |*entry| {
self.base.allocator.free(entry.key);
for (self.lazy_imports.keys()) |*key| {
self.base.allocator.free(key.*);
}
self.lazy_imports.deinit(self.base.allocator);
for (self.nonlazy_imports.items()) |*entry| {
self.base.allocator.free(entry.key);
for (self.nonlazy_imports.keys()) |*key| {
self.base.allocator.free(key.*);
}
self.nonlazy_imports.deinit(self.base.allocator);
self.pie_fixups.deinit(self.base.allocator);
@@ -1167,9 +1165,9 @@ pub fn deinit(self: *MachO) void {
self.offset_table.deinit(self.base.allocator);
self.offset_table_free_list.deinit(self.base.allocator);
{
var it = self.string_table_directory.iterator();
while (it.next()) |entry| {
self.base.allocator.free(entry.key);
var it = self.string_table_directory.keyIterator();
while (it.next()) |key| {
self.base.allocator.free(key.*);
}
}
self.string_table_directory.deinit(self.base.allocator);
@@ -1318,9 +1316,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
if (debug_buffers) |*dbg| {
dbg.dbg_line_buffer.deinit();
dbg.dbg_info_buffer.deinit();
var it = dbg.dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.relocs.deinit(self.base.allocator);
var it = dbg.dbg_info_type_relocs.valueIterator();
while (it.next()) |value| {
value.relocs.deinit(self.base.allocator);
}
dbg.dbg_info_type_relocs.deinit(self.base.allocator);
}
@@ -1543,7 +1541,7 @@ pub fn updateDeclExports(
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, "__text")) {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
@@ -1578,7 +1576,7 @@ pub fn updateDeclExports(
n_desc |= macho.N_WEAK_DEF;
},
.LinkOnce => {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
@@ -2259,7 +2257,7 @@ pub fn populateMissingMetadata(self: *MachO) !void {
self.load_commands_dirty = true;
}
if (!self.nonlazy_imports.contains("dyld_stub_binder")) {
const index = @intCast(u32, self.nonlazy_imports.items().len);
const index = @intCast(u32, self.nonlazy_imports.count());
const name = try self.base.allocator.dupe(u8, "dyld_stub_binder");
const offset = try self.makeString("dyld_stub_binder");
try self.nonlazy_imports.putNoClobber(self.base.allocator, name, .{
@@ -2440,7 +2438,7 @@ fn updateString(self: *MachO, old_str_off: u32, new_name: []const u8) !u32 {
}
pub fn addExternSymbol(self: *MachO, name: []const u8) !u32 {
const index = @intCast(u32, self.lazy_imports.items().len);
const index = @intCast(u32, self.lazy_imports.count());
const offset = try self.makeString(name);
const sym_name = try self.base.allocator.dupe(u8, name);
const dylib_ordinal = 1; // TODO this is now hardcoded, since we only support libSystem.
@@ -2627,7 +2625,7 @@ fn writeOffsetTableEntry(self: *MachO, index: usize) !void {
break :blk self.locals.items[got_entry.symbol];
},
.Extern => {
break :blk self.nonlazy_imports.items()[got_entry.symbol].value.symbol;
break :blk self.nonlazy_imports.values()[got_entry.symbol].symbol;
},
}
};
@@ -2910,7 +2908,7 @@ fn relocateSymbolTable(self: *MachO) !void {
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
const nlocals = self.locals.items.len;
const nglobals = self.globals.items.len;
const nundefs = self.lazy_imports.items().len + self.nonlazy_imports.items().len;
const nundefs = self.lazy_imports.count() + self.nonlazy_imports.count();
const nsyms = nlocals + nglobals + nundefs;
if (symtab.nsyms < nsyms) {
@@ -2957,15 +2955,15 @@ fn writeAllGlobalAndUndefSymbols(self: *MachO) !void {
const nlocals = self.locals.items.len;
const nglobals = self.globals.items.len;
const nundefs = self.lazy_imports.items().len + self.nonlazy_imports.items().len;
const nundefs = self.lazy_imports.count() + self.nonlazy_imports.count();
var undefs = std.ArrayList(macho.nlist_64).init(self.base.allocator);
defer undefs.deinit();
try undefs.ensureCapacity(nundefs);
for (self.lazy_imports.items()) |entry| {
undefs.appendAssumeCapacity(entry.value.symbol);
for (self.lazy_imports.values()) |*value| {
undefs.appendAssumeCapacity(value.symbol);
}
for (self.nonlazy_imports.items()) |entry| {
undefs.appendAssumeCapacity(entry.value.symbol);
for (self.nonlazy_imports.values()) |*value| {
undefs.appendAssumeCapacity(value.symbol);
}
const locals_off = symtab.symoff;
@@ -3005,10 +3003,10 @@ fn writeIndirectSymbolTable(self: *MachO) !void {
const la_symbol_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?];
const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab;
const lazy = self.lazy_imports.items();
const lazy_count = self.lazy_imports.count();
const got_entries = self.offset_table.items;
const allocated_size = self.allocatedSizeLinkedit(dysymtab.indirectsymoff);
const nindirectsyms = @intCast(u32, lazy.len * 2 + got_entries.len);
const nindirectsyms = @intCast(u32, lazy_count * 2 + got_entries.len);
const needed_size = @intCast(u32, nindirectsyms * @sizeOf(u32));
if (needed_size > allocated_size) {
@@ -3027,12 +3025,15 @@ fn writeIndirectSymbolTable(self: *MachO) !void {
var writer = stream.writer();
stubs.reserved1 = 0;
for (lazy) |_, i| {
const symtab_idx = @intCast(u32, dysymtab.iundefsym + i);
try writer.writeIntLittle(u32, symtab_idx);
{
var i: usize = 0;
while (i < lazy_count) : (i += 1) {
const symtab_idx = @intCast(u32, dysymtab.iundefsym + i);
try writer.writeIntLittle(u32, symtab_idx);
}
}
const base_id = @intCast(u32, lazy.len);
const base_id = @intCast(u32, lazy_count);
got.reserved1 = base_id;
for (got_entries) |entry| {
switch (entry.kind) {
@@ -3047,9 +3048,12 @@ fn writeIndirectSymbolTable(self: *MachO) !void {
}
la_symbol_ptr.reserved1 = got.reserved1 + @intCast(u32, got_entries.len);
for (lazy) |_, i| {
const symtab_idx = @intCast(u32, dysymtab.iundefsym + i);
try writer.writeIntLittle(u32, symtab_idx);
{
var i: usize = 0;
while (i < lazy_count) : (i += 1) {
const symtab_idx = @intCast(u32, dysymtab.iundefsym + i);
try writer.writeIntLittle(u32, symtab_idx);
}
}
try self.base.file.?.pwriteAll(buf, dysymtab.indirectsymoff);
@@ -3183,15 +3187,15 @@ fn writeRebaseInfoTable(self: *MachO) !void {
}
if (self.la_symbol_ptr_section_index) |idx| {
try pointers.ensureCapacity(pointers.items.len + self.lazy_imports.items().len);
try pointers.ensureCapacity(pointers.items.len + self.lazy_imports.count());
const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
const sect = seg.sections.items[idx];
const base_offset = sect.addr - seg.inner.vmaddr;
const segment_id = self.data_segment_cmd_index.?;
for (self.lazy_imports.items()) |entry| {
for (self.lazy_imports.values()) |*value| {
pointers.appendAssumeCapacity(.{
.offset = base_offset + entry.value.index * @sizeOf(u64),
.offset = base_offset + value.index * @sizeOf(u64),
.segment_id = segment_id,
});
}
@@ -3241,12 +3245,13 @@ fn writeBindingInfoTable(self: *MachO) !void {
for (self.offset_table.items) |entry| {
if (entry.kind == .Local) continue;
const import = self.nonlazy_imports.items()[entry.symbol];
const import_key = self.nonlazy_imports.keys()[entry.symbol];
const import_ordinal = self.nonlazy_imports.values()[entry.symbol].dylib_ordinal;
try pointers.append(.{
.offset = base_offset + entry.index * @sizeOf(u64),
.segment_id = segment_id,
.dylib_ordinal = import.value.dylib_ordinal,
.name = import.key,
.dylib_ordinal = import_ordinal,
.name = import_key,
});
}
}
@@ -3286,18 +3291,21 @@ fn writeLazyBindingInfoTable(self: *MachO) !void {
defer pointers.deinit();
if (self.la_symbol_ptr_section_index) |idx| {
try pointers.ensureCapacity(self.lazy_imports.items().len);
try pointers.ensureCapacity(self.lazy_imports.count());
const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
const sect = seg.sections.items[idx];
const base_offset = sect.addr - seg.inner.vmaddr;
const segment_id = @intCast(u16, self.data_segment_cmd_index.?);
for (self.lazy_imports.items()) |entry| {
const slice = self.lazy_imports.entries.slice();
const keys = slice.items(.key);
const values = slice.items(.value);
for (keys) |*key, i| {
pointers.appendAssumeCapacity(.{
.offset = base_offset + entry.value.index * @sizeOf(u64),
.offset = base_offset + values[i].index * @sizeOf(u64),
.segment_id = segment_id,
.dylib_ordinal = entry.value.dylib_ordinal,
.name = entry.key,
.dylib_ordinal = values[i].dylib_ordinal,
.name = key.*,
});
}
}
@@ -3329,7 +3337,7 @@ fn writeLazyBindingInfoTable(self: *MachO) !void {
}
fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
if (self.lazy_imports.items().len == 0) return;
if (self.lazy_imports.count() == 0) return;
var stream = std.io.fixedBufferStream(buffer);
var reader = stream.reader();
@@ -3375,7 +3383,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
else => {},
}
}
assert(self.lazy_imports.items().len <= offsets.items.len);
assert(self.lazy_imports.count() <= offsets.items.len);
const stub_size: u4 = switch (self.base.options.target.cpu.arch) {
.x86_64 => 10,
@@ -3388,9 +3396,9 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void {
else => unreachable,
};
var buf: [@sizeOf(u32)]u8 = undefined;
for (self.lazy_imports.items()) |_, i| {
for (offsets.items[0..self.lazy_imports.count()]) |offset, i| {
const placeholder_off = self.stub_helper_stubs_start_off.? + i * stub_size + off;
mem.writeIntLittle(u32, &buf, offsets.items[i]);
mem.writeIntLittle(u32, &buf, offset);
try self.base.file.?.pwriteAll(&buf, placeholder_off);
}
}
+7 -5
View File
@@ -92,9 +92,11 @@ pub fn init(allocator: *Allocator) Archive {
}
pub fn deinit(self: *Archive) void {
for (self.toc.items()) |*entry| {
self.allocator.free(entry.key);
entry.value.deinit(self.allocator);
for (self.toc.keys()) |*key| {
self.allocator.free(key.*);
}
for (self.toc.values()) |*value| {
value.deinit(self.allocator);
}
self.toc.deinit(self.allocator);
@@ -187,10 +189,10 @@ fn parseTableOfContents(self: *Archive, reader: anytype) !void {
defer if (res.found_existing) self.allocator.free(owned_name);
if (!res.found_existing) {
res.entry.value = .{};
res.value_ptr.* = .{};
}
try res.entry.value.append(self.allocator, object_offset);
try res.value_ptr.append(self.allocator, object_offset);
}
}
+22 -18
View File
@@ -997,12 +997,12 @@ pub fn initDeclDebugBuffers(
if (fn_ret_has_bits) {
const gop = try dbg_info_type_relocs.getOrPut(allocator, fn_ret_type);
if (!gop.found_existing) {
gop.entry.value = .{
gop.value_ptr.* = .{
.off = undefined,
.relocs = .{},
};
}
try gop.entry.value.relocs.append(allocator, @intCast(u32, dbg_info_buffer.items.len));
try gop.value_ptr.relocs.append(allocator, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4
}
dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string
@@ -1158,26 +1158,30 @@ pub fn commitDeclDebugInfo(
if (dbg_info_buffer.items.len == 0)
return;
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(entry.key, dbg_info_buffer, target);
{
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len);
try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer, target);
}
}
try self.updateDeclDebugInfoAllocation(allocator, text_block, @intCast(u32, dbg_info_buffer.items.len));
// Now that we have the offset assigned we can finally perform type relocations.
it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
for (entry.value.relocs.items) |off| {
mem.writeIntLittle(
u32,
dbg_info_buffer.items[off..][0..4],
text_block.dbg_info_off + entry.value.off,
);
{
// Now that we have the offset assigned we can finally perform type relocations.
var it = dbg_info_type_relocs.valueIterator();
while (it.next()) |value| {
for (value.relocs.items) |off| {
mem.writeIntLittle(
u32,
dbg_info_buffer.items[off..][0..4],
text_block.dbg_info_off + value.off,
);
}
}
}
+3 -3
View File
@@ -50,9 +50,9 @@ pub fn deinit(self: *Dylib) void {
}
self.load_commands.deinit(self.allocator);
for (self.symbols.items()) |entry| {
entry.value.deinit(self.allocator);
self.allocator.destroy(entry.value);
for (self.symbols.values()) |value| {
value.deinit(self.allocator);
self.allocator.destroy(value);
}
self.symbols.deinit(self.allocator);
+20 -23
View File
@@ -168,9 +168,9 @@ pub fn deinit(self: *Zld) void {
self.strtab.deinit(self.allocator);
{
var it = self.strtab_dir.iterator();
while (it.next()) |entry| {
self.allocator.free(entry.key);
var it = self.strtab_dir.keyIterator();
while (it.next()) |key| {
self.allocator.free(key.*);
}
}
self.strtab_dir.deinit(self.allocator);
@@ -954,9 +954,8 @@ fn sortSections(self: *Zld) !void {
}
}
var it = self.mappings.iterator();
while (it.next()) |entry| {
const mapping = &entry.value;
var it = self.mappings.valueIterator();
while (it.next()) |mapping| {
if (self.text_segment_cmd_index.? == mapping.target_seg_id) {
const new_index = text_index_mapping.get(mapping.target_sect_id) orelse unreachable;
mapping.target_sect_id = new_index;
@@ -1400,16 +1399,16 @@ fn resolveSymbolsInObject(self: *Zld, object: *Object) !void {
if (sym.cast(Symbol.Regular)) |reg| {
if (reg.linkage == .translation_unit) continue; // Symbol local to TU.
if (self.unresolved.swapRemove(sym.name)) |entry| {
if (self.unresolved.fetchSwapRemove(sym.name)) |kv| {
// Create link to the global.
entry.value.alias = sym;
kv.value.alias = sym;
}
const entry = self.globals.getEntry(sym.name) orelse {
const sym_ptr = self.globals.getPtr(sym.name) orelse {
// Put new global symbol into the symbol table.
try self.globals.putNoClobber(self.allocator, sym.name, sym);
continue;
};
const g_sym = entry.value;
const g_sym = sym_ptr.*;
const g_reg = g_sym.cast(Symbol.Regular) orelse unreachable;
switch (g_reg.linkage) {
@@ -1432,7 +1431,7 @@ fn resolveSymbolsInObject(self: *Zld, object: *Object) !void {
}
g_sym.alias = sym;
entry.value = sym;
sym_ptr.* = sym;
} else if (sym.cast(Symbol.Unresolved)) |und| {
if (self.globals.get(sym.name)) |g_sym| {
sym.alias = g_sym;
@@ -1458,8 +1457,7 @@ fn resolveSymbols(self: *Zld) !void {
while (true) {
if (next_sym == self.unresolved.count()) break;
const entry = self.unresolved.items()[next_sym];
const sym = entry.value;
const sym = self.unresolved.values()[next_sym];
var reset: bool = false;
for (self.archives.items) |archive| {
@@ -1492,8 +1490,8 @@ fn resolveSymbols(self: *Zld) !void {
defer unresolved.deinit();
try unresolved.ensureCapacity(self.unresolved.count());
for (self.unresolved.items()) |entry| {
unresolved.appendAssumeCapacity(entry.value);
for (self.unresolved.values()) |value| {
unresolved.appendAssumeCapacity(value);
}
self.unresolved.clearAndFree(self.allocator);
@@ -2780,8 +2778,7 @@ fn writeSymbolTable(self: *Zld) !void {
var undefs = std.ArrayList(macho.nlist_64).init(self.allocator);
defer undefs.deinit();
for (self.imports.items()) |entry| {
const sym = entry.value;
for (self.imports.values()) |sym| {
const ordinal = ordinal: {
const dylib = sym.cast(Symbol.Proxy).?.dylib orelse break :ordinal 1; // TODO handle libSystem
break :ordinal dylib.ordinal.?;
@@ -3071,9 +3068,9 @@ pub fn parseName(name: *const [16]u8) []const u8 {
fn printSymbols(self: *Zld) void {
log.debug("globals", .{});
for (self.globals.items()) |entry| {
const sym = entry.value.cast(Symbol.Regular) orelse unreachable;
log.debug(" | {s} @ {*}", .{ sym.base.name, entry.value });
for (self.globals.values()) |value| {
const sym = value.cast(Symbol.Regular) orelse unreachable;
log.debug(" | {s} @ {*}", .{ sym.base.name, value });
log.debug(" => alias of {*}", .{sym.base.alias});
log.debug(" => linkage {s}", .{sym.linkage});
log.debug(" => defined in {s}", .{sym.file.name.?});
@@ -3091,9 +3088,9 @@ fn printSymbols(self: *Zld) void {
}
}
log.debug("proxies", .{});
for (self.imports.items()) |entry| {
const sym = entry.value.cast(Symbol.Proxy) orelse unreachable;
log.debug(" | {s} @ {*}", .{ sym.base.name, entry.value });
for (self.imports.values()) |value| {
const sym = value.cast(Symbol.Proxy) orelse unreachable;
log.debug(" | {s} @ {*}", .{ sym.base.name, value });
log.debug(" => alias of {*}", .{sym.base.alias});
log.debug(" => defined in libSystem.B.dylib", .{});
}
+3 -5
View File
@@ -114,7 +114,7 @@ pub fn updateDeclExports(
) !void {}
pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void {
self.decl_table.removeAssertDiscard(decl);
assert(self.decl_table.swapRemove(decl));
}
pub fn flush(self: *SpirV, comp: *Compilation) !void {
@@ -141,8 +141,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
// declarations which don't generate a result?
// TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
{
for (self.decl_table.items()) |entry| {
const decl = entry.key;
for (self.decl_table.keys()) |decl| {
if (!decl.has_tv) continue;
decl.fn_link.spirv.id = spv.allocResultId();
@@ -154,8 +153,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
var decl_gen = codegen.DeclGen.init(&spv);
defer decl_gen.deinit();
for (self.decl_table.items()) |entry| {
const decl = entry.key;
for (self.decl_table.keys()) |decl| {
if (!decl.has_tv) continue;
if (try decl_gen.gen(decl)) |msg| {
+7 -7
View File
@@ -422,8 +422,8 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
var count: u32 = 0;
for (module.decl_exports.entries.items) |entry| {
for (entry.value) |exprt| {
for (module.decl_exports.values()) |exports| {
for (exports) |exprt| {
// Export name length + name
try leb.writeULEB128(writer, @intCast(u32, exprt.options.name.len));
try writer.writeAll(exprt.options.name);
@@ -590,8 +590,8 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
self.base.releaseLock();
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
_ = try man.addFile(entry.key.status.success.object_path, null);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
try man.addOptionalFile(compiler_rt_path);
@@ -638,7 +638,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
break :blk self.base.options.objects[0];
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.items()[0].key.status.success.object_path;
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk p;
@@ -712,8 +712,8 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
// Positional arguments to the linker such as object files.
try argv.appendSlice(self.base.options.objects);
for (comp.c_object_table.items()) |entry| {
try argv.append(entry.key.status.success.object_path);
for (comp.c_object_table.keys()) |key| {
try argv.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
try argv.append(p);
+28 -27
View File
@@ -2,6 +2,7 @@ const std = @import("std");
const ir = @import("air.zig");
const trace = @import("tracy.zig").trace;
const log = std.log.scoped(.liveness);
const assert = std.debug.assert;
/// Perform Liveness Analysis over the `Body`. Each `Inst` will have its `deaths` field populated.
pub fn analyze(
@@ -86,9 +87,9 @@ fn analyzeInst(
// Reset the table back to its state from before the branch.
{
var it = then_table.iterator();
while (it.next()) |entry| {
table.removeAssertDiscard(entry.key);
var it = then_table.keyIterator();
while (it.next()) |key| {
assert(table.remove(key.*));
}
}
@@ -102,9 +103,9 @@ fn analyzeInst(
defer else_entry_deaths.deinit();
{
var it = else_table.iterator();
while (it.next()) |entry| {
const else_death = entry.key;
var it = else_table.keyIterator();
while (it.next()) |key| {
const else_death = key.*;
if (!then_table.contains(else_death)) {
try then_entry_deaths.append(else_death);
}
@@ -113,9 +114,9 @@ fn analyzeInst(
// This loop is the same, except it's for the then branch, and it additionally
// has to put its items back into the table to undo the reset.
{
var it = then_table.iterator();
while (it.next()) |entry| {
const then_death = entry.key;
var it = then_table.keyIterator();
while (it.next()) |key| {
const then_death = key.*;
if (!else_table.contains(then_death)) {
try else_entry_deaths.append(then_death);
}
@@ -125,13 +126,13 @@ fn analyzeInst(
// Now we have to correctly populate new_set.
if (new_set) |ns| {
try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count()));
var it = then_table.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
var it = then_table.keyIterator();
while (it.next()) |key| {
_ = ns.putAssumeCapacity(key.*, {});
}
it = else_table.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
it = else_table.keyIterator();
while (it.next()) |key| {
_ = ns.putAssumeCapacity(key.*, {});
}
}
inst.then_death_count = std.math.cast(@TypeOf(inst.then_death_count), then_entry_deaths.items.len) catch return error.OutOfMemory;
@@ -159,18 +160,18 @@ fn analyzeInst(
try analyzeWithTable(arena, table, &case_tables[i], case.body);
// Reset the table back to its state from before the case.
var it = case_tables[i].iterator();
while (it.next()) |entry| {
table.removeAssertDiscard(entry.key);
var it = case_tables[i].keyIterator();
while (it.next()) |key| {
assert(table.remove(key.*));
}
}
{ // else
try analyzeWithTable(arena, table, &case_tables[case_tables.len - 1], inst.else_body);
// Reset the table back to its state from before the case.
var it = case_tables[case_tables.len - 1].iterator();
while (it.next()) |entry| {
table.removeAssertDiscard(entry.key);
var it = case_tables[case_tables.len - 1].keyIterator();
while (it.next()) |key| {
assert(table.remove(key.*));
}
}
@@ -184,9 +185,9 @@ fn analyzeInst(
var total_deaths: u32 = 0;
for (case_tables) |*ct, i| {
total_deaths += ct.count();
var it = ct.iterator();
while (it.next()) |entry| {
const case_death = entry.key;
var it = ct.keyIterator();
while (it.next()) |key| {
const case_death = key.*;
for (case_tables) |*ct_inner, j| {
if (i == j) continue;
if (!ct_inner.contains(case_death)) {
@@ -203,9 +204,9 @@ fn analyzeInst(
if (new_set) |ns| {
try ns.ensureCapacity(@intCast(u32, ns.count() + total_deaths));
for (case_tables) |*ct| {
var it = ct.iterator();
while (it.next()) |entry| {
_ = ns.putAssumeCapacity(entry.key, {});
var it = ct.keyIterator();
while (it.next()) |key| {
_ = ns.putAssumeCapacity(key.*, {});
}
}
}
+6 -6
View File
@@ -180,7 +180,7 @@ pub fn mainArgs(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v
"in order to determine where libc is installed. However the system C " ++
"compiler is `zig cc`, so no libc installation was found.", .{});
}
try env_map.set(inf_loop_env_key, "1");
try env_map.put(inf_loop_env_key, "1");
// Some programs such as CMake will strip the `cc` and subsequent args from the
// CC environment variable. We detect and support this scenario here because of
@@ -2310,9 +2310,9 @@ fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !voi
fn freePkgTree(gpa: *Allocator, pkg: *Package, free_parent: bool) void {
{
var it = pkg.table.iterator();
while (it.next()) |kv| {
freePkgTree(gpa, kv.value, true);
var it = pkg.table.valueIterator();
while (it.next()) |value| {
freePkgTree(gpa, value.*, true);
}
}
if (free_parent) {
@@ -3895,7 +3895,7 @@ pub fn cmdChangelist(
var it = inst_map.iterator();
while (it.next()) |entry| {
try stdout.print(" %{d} => %{d}\n", .{
entry.key, entry.value,
entry.key_ptr.*, entry.value_ptr.*,
});
}
}
@@ -3904,7 +3904,7 @@ pub fn cmdChangelist(
var it = extra_map.iterator();
while (it.next()) |entry| {
try stdout.print(" {d} => {d}\n", .{
entry.key, entry.value,
entry.key_ptr.*, entry.value_ptr.*,
});
}
}
+4 -3
View File
@@ -135,9 +135,10 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const s = path.sep_str;
for (source_table.items()) |entry| {
const src_file = entry.key;
const ext = entry.value;
var it = source_table.iterator();
while (it.next()) |entry| {
const src_file = entry.key_ptr.*;
const ext = entry.value_ptr.*;
const dirname = path.dirname(src_file).?;
const basename = path.basename(src_file);
+5 -5
View File
@@ -453,7 +453,7 @@ fn declVisitorNamesOnly(c: *Context, decl: *const clang.Decl) Error!void {
// Don't put this one in `decl_table` so it's processed later.
return;
}
result.entry.value = name;
result.value_ptr.* = name;
// Put this typedef in the decl_table to avoid redefinitions.
try c.decl_table.putNoClobber(c.gpa, @ptrToInt(typedef_decl.getCanonicalDecl()), name);
}
@@ -5765,14 +5765,14 @@ fn getFnProto(c: *Context, ref: Node) ?*ast.Payload.Func {
fn addMacros(c: *Context) !void {
var it = c.global_scope.macro_table.iterator();
while (it.next()) |kv| {
if (getFnProto(c, kv.value)) |proto_node| {
while (it.next()) |entry| {
if (getFnProto(c, entry.value_ptr.*)) |proto_node| {
// If a macro aliases a global variable which is a function pointer, we conclude that
// the macro is intended to represent a function that assumes the function pointer
// variable is non-null and calls it.
try addTopLevelDecl(c, kv.key, try transCreateNodeMacroFn(c, kv.key, kv.value, proto_node));
try addTopLevelDecl(c, entry.key_ptr.*, try transCreateNodeMacroFn(c, entry.key_ptr.*, entry.value_ptr.*, proto_node));
} else {
try addTopLevelDecl(c, kv.key, kv.value);
try addTopLevelDecl(c, entry.key_ptr.*, entry.value_ptr.*);
}
}
}
+29 -24
View File
@@ -596,6 +596,15 @@ pub const Type = extern union {
return hasher.final();
}
pub const HashContext = struct {
pub fn hash(self: @This(), t: Type) u64 {
return t.hash();
}
pub fn eql(self: @This(), a: Type, b: Type) bool {
return a.eql(b);
}
};
pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {
if (self.tag_if_small_enough < Tag.no_payload_count) {
return Type{ .tag_if_small_enough = self.tag_if_small_enough };
@@ -1147,8 +1156,8 @@ pub const Type = extern union {
.@"struct" => {
// TODO introduce lazy value mechanism
const struct_obj = self.castTag(.@"struct").?.data;
for (struct_obj.fields.entries.items) |entry| {
if (entry.value.ty.hasCodeGenBits())
for (struct_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
@@ -1169,8 +1178,8 @@ pub const Type = extern union {
},
.@"union" => {
const union_obj = self.castTag(.@"union").?.data;
for (union_obj.fields.entries.items) |entry| {
if (entry.value.ty.hasCodeGenBits())
for (union_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
@@ -1181,8 +1190,8 @@ pub const Type = extern union {
if (union_obj.tag_ty.hasCodeGenBits()) {
return true;
}
for (union_obj.fields.entries.items) |entry| {
if (entry.value.ty.hasCodeGenBits())
for (union_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
@@ -1380,10 +1389,9 @@ pub const Type = extern union {
// like we have in stage1.
const struct_obj = self.castTag(.@"struct").?.data;
var biggest: u32 = 0;
for (struct_obj.fields.entries.items) |entry| {
const field_ty = entry.value.ty;
if (!field_ty.hasCodeGenBits()) continue;
const field_align = field_ty.abiAlignment(target);
for (struct_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = field.ty.abiAlignment(target);
if (field_align > biggest) {
return field_align;
}
@@ -1399,10 +1407,9 @@ pub const Type = extern union {
.union_tagged => {
const union_obj = self.castTag(.union_tagged).?.data;
var biggest: u32 = union_obj.tag_ty.abiAlignment(target);
for (union_obj.fields.entries.items) |entry| {
const field_ty = entry.value.ty;
if (!field_ty.hasCodeGenBits()) continue;
const field_align = field_ty.abiAlignment(target);
for (union_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = field.ty.abiAlignment(target);
if (field_align > biggest) {
biggest = field_align;
}
@@ -1413,10 +1420,9 @@ pub const Type = extern union {
.@"union" => {
const union_obj = self.castTag(.@"union").?.data;
var biggest: u32 = 0;
for (union_obj.fields.entries.items) |entry| {
const field_ty = entry.value.ty;
if (!field_ty.hasCodeGenBits()) continue;
const field_align = field_ty.abiAlignment(target);
for (union_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = field.ty.abiAlignment(target);
if (field_align > biggest) {
biggest = field_align;
}
@@ -2415,9 +2421,8 @@ pub const Type = extern union {
.@"struct" => {
const s = ty.castTag(.@"struct").?.data;
assert(s.haveFieldTypes());
for (s.fields.entries.items) |entry| {
const field_ty = entry.value.ty;
if (field_ty.onePossibleValue() == null) {
for (s.fields.values()) |field| {
if (field.ty.onePossibleValue() == null) {
return null;
}
}
@@ -2426,7 +2431,7 @@ pub const Type = extern union {
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
if (enum_full.fields.count() == 1) {
return enum_full.values.entries.items[0].key;
return enum_full.values.keys()[0];
} else {
return null;
}
@@ -2583,11 +2588,11 @@ pub const Type = extern union {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.fields.entries.items[field_index].key;
return enum_full.fields.keys()[field_index];
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.fields.entries.items[field_index].key;
return enum_simple.fields.keys()[field_index];
},
.atomic_ordering,
.atomic_rmw_op,
+17
View File
@@ -1256,6 +1256,23 @@ pub const Value = extern union {
return hasher.final();
}
pub const ArrayHashContext = struct {
pub fn hash(self: @This(), v: Value) u32 {
return v.hash_u32();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
return a.eql(b);
}
};
pub const HashContext = struct {
pub fn hash(self: @This(), v: Value) u64 {
return v.hash();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
return a.eql(b);
}
};
/// Asserts the value is a pointer and dereferences it.
/// Returns error.AnalysisFail if the pointer points to a Decl that failed semantic analysis.
pub fn pointerDeref(self: Value, allocator: *Allocator) error{ AnalysisFail, OutOfMemory }!Value {
+2 -2
View File
@@ -107,11 +107,11 @@ test "union with specified enum tag" {
comptime try doTest();
}
fn doTest() !void {
fn doTest() error{TestUnexpectedResult}!void {
try expect((try bar(Payload{ .A = 1234 })) == -10);
}
fn bar(value: Payload) !i32 {
fn bar(value: Payload) error{TestUnexpectedResult}!i32 {
try expect(@as(Letter, value) == Letter.A);
return switch (value) {
Payload.A => |x| return x - 1244,
+12 -12
View File
@@ -377,14 +377,14 @@ pub fn main() !void {
const gop = try hash_to_contents.getOrPut(hash);
if (gop.found_existing) {
max_bytes_saved += raw_bytes.len;
gop.entry.value.hit_count += 1;
gop.value_ptr.hit_count += 1;
std.debug.warn("duplicate: {s} {s} ({:2})\n", .{
libc_target.name,
rel_path,
std.fmt.fmtIntSizeDec(raw_bytes.len),
});
} else {
gop.entry.value = Contents{
gop.value_ptr.* = Contents{
.bytes = trimmed,
.hit_count = 1,
.hash = hash,
@@ -392,10 +392,10 @@ pub fn main() !void {
};
}
const path_gop = try path_table.getOrPut(rel_path);
const target_to_hash = if (path_gop.found_existing) path_gop.entry.value else blk: {
const target_to_hash = if (path_gop.found_existing) path_gop.value_ptr.* else blk: {
const ptr = try allocator.create(TargetToHash);
ptr.* = TargetToHash.init(allocator);
path_gop.entry.value = ptr;
path_gop.value_ptr.* = ptr;
break :blk ptr;
};
try target_to_hash.putNoClobber(dest_target, hash);
@@ -423,9 +423,9 @@ pub fn main() !void {
while (path_it.next()) |path_kv| {
var contents_list = std.ArrayList(*Contents).init(allocator);
{
var hash_it = path_kv.value.iterator();
var hash_it = path_kv.value.*.iterator();
while (hash_it.next()) |hash_kv| {
const contents = &hash_to_contents.getEntry(hash_kv.value).?.value;
const contents = hash_to_contents.get(hash_kv.value.*).?;
try contents_list.append(contents);
}
}
@@ -433,7 +433,7 @@ pub fn main() !void {
const best_contents = contents_list.popOrNull().?;
if (best_contents.hit_count > 1) {
// worth it to make it generic
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, generic_name, path_kv.key });
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, generic_name, path_kv.key.* });
try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
try std.fs.cwd().writeFile(full_path, best_contents.bytes);
best_contents.is_generic = true;
@@ -443,17 +443,17 @@ pub fn main() !void {
missed_opportunity_bytes += this_missed_bytes;
std.debug.warn("Missed opportunity ({:2}): {s}\n", .{
std.fmt.fmtIntSizeDec(this_missed_bytes),
path_kv.key,
path_kv.key.*,
});
} else break;
}
}
var hash_it = path_kv.value.iterator();
var hash_it = path_kv.value.*.iterator();
while (hash_it.next()) |hash_kv| {
const contents = &hash_to_contents.getEntry(hash_kv.value).?.value;
const contents = hash_to_contents.get(hash_kv.value.*).?;
if (contents.is_generic) continue;
const dest_target = hash_kv.key;
const dest_target = hash_kv.key.*;
const arch_name = switch (dest_target.arch) {
.specific => |a| @tagName(a),
else => @tagName(dest_target.arch),
@@ -463,7 +463,7 @@ pub fn main() !void {
@tagName(dest_target.os),
@tagName(dest_target.abi),
});
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, out_subpath, path_kv.key });
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, out_subpath, path_kv.key.* });
try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
try std.fs.cwd().writeFile(full_path, contents.bytes);
}
+3 -3
View File
@@ -413,12 +413,12 @@ pub fn main() anyerror!void {
var it = root_map.iterator();
it_map: while (it.next()) |kv| {
if (kv.key.len == 0) continue;
if (kv.key[0] == '!') continue;
if (kv.value != .Object) continue;
if (kv.key.*[0] == '!') continue;
if (kv.value.* != .Object) continue;
if (!kv.value.Object.contains("NumArgs")) continue;
if (!kv.value.Object.contains("Name")) continue;
for (blacklisted_options) |blacklisted_key| {
if (std.mem.eql(u8, blacklisted_key, kv.key)) continue :it_map;
if (std.mem.eql(u8, blacklisted_key, kv.key.*)) continue :it_map;
}
if (kv.value.Object.get("Name").?.String.len == 0) continue;
try all_objects.append(&kv.value.Object);
+17 -17
View File
@@ -903,8 +903,8 @@ fn processOneTarget(job: Job) anyerror!void {
var it = root_map.iterator();
root_it: while (it.next()) |kv| {
if (kv.key.len == 0) continue;
if (kv.key[0] == '!') continue;
if (kv.value != .Object) continue;
if (kv.key.*[0] == '!') continue;
if (kv.value.* != .Object) continue;
if (hasSuperclass(&kv.value.Object, "SubtargetFeature")) {
const llvm_name = kv.value.Object.get("Name").?.String;
if (llvm_name.len == 0) continue;
@@ -917,7 +917,7 @@ fn processOneTarget(job: Job) anyerror!void {
const implies = kv.value.Object.get("Implies").?.Array;
for (implies.items) |imply| {
const other_key = imply.Object.get("def").?.String;
const other_obj = &root_map.getEntry(other_key).?.value.Object;
const other_obj = &root_map.getPtr(other_key).?.Object;
const other_llvm_name = other_obj.get("Name").?.String;
const other_zig_name = (try llvmNameToZigNameOmit(
arena,
@@ -969,7 +969,7 @@ fn processOneTarget(job: Job) anyerror!void {
const features = kv.value.Object.get("Features").?.Array;
for (features.items) |feature| {
const feature_key = feature.Object.get("def").?.String;
const feature_obj = &root_map.getEntry(feature_key).?.value.Object;
const feature_obj = &root_map.getPtr(feature_key).?.Object;
const feature_llvm_name = feature_obj.get("Name").?.String;
if (feature_llvm_name.len == 0) continue;
const feature_zig_name = (try llvmNameToZigNameOmit(
@@ -982,7 +982,7 @@ fn processOneTarget(job: Job) anyerror!void {
const tune_features = kv.value.Object.get("TuneFeatures").?.Array;
for (tune_features.items) |feature| {
const feature_key = feature.Object.get("def").?.String;
const feature_obj = &root_map.getEntry(feature_key).?.value.Object;
const feature_obj = &root_map.getPtr(feature_key).?.Object;
const feature_llvm_name = feature_obj.get("Name").?.String;
if (feature_llvm_name.len == 0) continue;
const feature_zig_name = (try llvmNameToZigNameOmit(
@@ -1109,9 +1109,9 @@ fn processOneTarget(job: Job) anyerror!void {
try pruneFeatures(arena, features_table, &deps_set);
var dependencies = std.ArrayList([]const u8).init(arena);
{
var it = deps_set.iterator();
while (it.next()) |entry| {
try dependencies.append(entry.key);
var it = deps_set.keyIterator();
while (it.next()) |key| {
try dependencies.append(key.*);
}
}
std.sort.sort([]const u8, dependencies.items, {}, asciiLessThan);
@@ -1154,9 +1154,9 @@ fn processOneTarget(job: Job) anyerror!void {
try pruneFeatures(arena, features_table, &deps_set);
var cpu_features = std.ArrayList([]const u8).init(arena);
{
var it = deps_set.iterator();
while (it.next()) |entry| {
try cpu_features.append(entry.key);
var it = deps_set.keyIterator();
while (it.next()) |key| {
try cpu_features.append(key.*);
}
}
std.sort.sort([]const u8, cpu_features.items, {}, asciiLessThan);
@@ -1278,16 +1278,16 @@ fn pruneFeatures(
// Then, iterate over the deletion set and delete all that stuff from `deps_set`.
var deletion_set = std.StringHashMap(void).init(arena);
{
var it = deps_set.iterator();
while (it.next()) |entry| {
const feature = features_table.get(entry.key).?;
var it = deps_set.keyIterator();
while (it.next()) |key| {
const feature = features_table.get(key.*).?;
try walkFeatures(features_table, &deletion_set, feature);
}
}
{
var it = deletion_set.iterator();
while (it.next()) |entry| {
_ = deps_set.remove(entry.key);
var it = deletion_set.keyIterator();
while (it.next()) |key| {
_ = deps_set.remove(key.*);
}
}
}
+22 -22
View File
@@ -148,12 +148,12 @@ pub fn main() !void {
for (abi_lists) |*abi_list| {
const target_funcs_gop = try target_functions.getOrPut(@ptrToInt(abi_list));
if (!target_funcs_gop.found_existing) {
target_funcs_gop.entry.value = FunctionSet{
target_funcs_gop.value_ptr.* = FunctionSet{
.list = std.ArrayList(VersionedFn).init(allocator),
.fn_vers_list = FnVersionList.init(allocator),
};
}
const fn_set = &target_funcs_gop.entry.value.list;
const fn_set = &target_funcs_gop.value_ptr.list;
for (lib_names) |lib_name, lib_name_index| {
const lib_prefix = if (std.mem.eql(u8, lib_name, "ld")) "" else "lib";
@@ -203,11 +203,11 @@ pub fn main() !void {
try global_ver_set.put(ver, undefined);
const gop = try global_fn_set.getOrPut(name);
if (gop.found_existing) {
if (!std.mem.eql(u8, gop.entry.value.lib, "c")) {
gop.entry.value.lib = lib_name;
if (!std.mem.eql(u8, gop.value_ptr.lib, "c")) {
gop.value_ptr.lib = lib_name;
}
} else {
gop.entry.value = Function{
gop.value_ptr.* = Function{
.name = name,
.lib = lib_name,
.index = undefined,
@@ -223,15 +223,15 @@ pub fn main() !void {
const global_fn_list = blk: {
var list = std.ArrayList([]const u8).init(allocator);
var it = global_fn_set.iterator();
while (it.next()) |entry| try list.append(entry.key);
var it = global_fn_set.keyIterator();
while (it.next()) |key| try list.append(key.*);
std.sort.sort([]const u8, list.items, {}, strCmpLessThan);
break :blk list.items;
};
const global_ver_list = blk: {
var list = std.ArrayList([]const u8).init(allocator);
var it = global_ver_set.iterator();
while (it.next()) |entry| try list.append(entry.key);
var it = global_ver_set.keyIterator();
while (it.next()) |key| try list.append(key.*);
std.sort.sort([]const u8, list.items, {}, versionLessThan);
break :blk list.items;
};
@@ -254,9 +254,9 @@ pub fn main() !void {
var buffered = std.io.bufferedWriter(fns_txt_file.writer());
const fns_txt = buffered.writer();
for (global_fn_list) |name, i| {
const entry = global_fn_set.getEntry(name).?;
entry.value.index = i;
try fns_txt.print("{s} {s}\n", .{ name, entry.value.lib });
const value = global_fn_set.getPtr(name).?;
value.index = i;
try fns_txt.print("{s} {s}\n", .{ name, value.lib });
}
try buffered.flush();
}
@@ -264,16 +264,16 @@ pub fn main() !void {
// Now the mapping of version and function to integer index is complete.
// Here we create a mapping of function name to list of versions.
for (abi_lists) |*abi_list, abi_index| {
const entry = target_functions.getEntry(@ptrToInt(abi_list)).?;
const fn_vers_list = &entry.value.fn_vers_list;
for (entry.value.list.items) |*ver_fn| {
const value = target_functions.getPtr(@ptrToInt(abi_list)).?;
const fn_vers_list = &value.fn_vers_list;
for (value.list.items) |*ver_fn| {
const gop = try fn_vers_list.getOrPut(ver_fn.name);
if (!gop.found_existing) {
gop.entry.value = std.ArrayList(usize).init(allocator);
gop.value_ptr.* = std.ArrayList(usize).init(allocator);
}
const ver_index = global_ver_set.getEntry(ver_fn.ver).?.value;
if (std.mem.indexOfScalar(usize, gop.entry.value.items, ver_index) == null) {
try gop.entry.value.append(ver_index);
const ver_index = global_ver_set.get(ver_fn.ver).?;
if (std.mem.indexOfScalar(usize, gop.value_ptr.items, ver_index) == null) {
try gop.value_ptr.append(ver_index);
}
}
}
@@ -287,7 +287,7 @@ pub fn main() !void {
// first iterate over the abi lists
for (abi_lists) |*abi_list, abi_index| {
const fn_vers_list = &target_functions.getEntry(@ptrToInt(abi_list)).?.value.fn_vers_list;
const fn_vers_list = &target_functions.getPtr(@ptrToInt(abi_list)).?.fn_vers_list;
for (abi_list.targets) |target, it_i| {
if (it_i != 0) try abilist_txt.writeByte(' ');
try abilist_txt.print("{s}-linux-{s}", .{ @tagName(target.arch), @tagName(target.abi) });
@@ -295,11 +295,11 @@ pub fn main() !void {
try abilist_txt.writeByte('\n');
// next, each line implicitly corresponds to a function
for (global_fn_list) |name| {
const entry = fn_vers_list.getEntry(name) orelse {
const value = fn_vers_list.getPtr(name) orelse {
try abilist_txt.writeByte('\n');
continue;
};
for (entry.value.items) |ver_index, it_i| {
for (value.items) |ver_index, it_i| {
if (it_i != 0) try abilist_txt.writeByte(' ');
try abilist_txt.print("{d}", .{ver_index});
}