From 85de022c5671d777f62ddff254a814dab05242fc Mon Sep 17 00:00:00 2001
From: Lee Cannon ", .{ source_type, syntax_block.name });
@@ -1188,7 +1188,7 @@ fn printShell(out: anytype, shell_content: []const u8) !void {
}
fn genHtml(
- allocator: *Allocator,
+ allocator: Allocator,
tokenizer: *Tokenizer,
toc: *Toc,
out: anytype,
@@ -1687,7 +1687,7 @@ fn genHtml(
}
}
-fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
+fn exec(allocator: Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
const result = try ChildProcess.exec(.{
.allocator = allocator,
.argv = args,
@@ -1711,7 +1711,7 @@ fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !
return result;
}
-fn getBuiltinCode(allocator: *Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
+fn getBuiltinCode(allocator: Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
return result.stdout;
}
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 1eafe28be2be..61f19c20d083 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -460,7 +460,7 @@ const WindowsThreadImpl = struct {
errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
- const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator.create(Instance) catch unreachable;
+ const instance = std.heap.FixedBufferAllocator.init(instance_bytes).getAllocator().create(Instance) catch unreachable;
instance.* = .{
.fn_args = args,
.thread = .{
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index e787abf1efb7..7ebafc0a1b4b 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -79,7 +79,7 @@ pub fn ArrayHashMap(
comptime std.hash_map.verifyContext(Context, K, K, u32);
return struct {
unmanaged: Unmanaged,
- allocator: *Allocator,
+ allocator: Allocator,
ctx: Context,
/// The ArrayHashMapUnmanaged type using the same settings as this managed map.
@@ -118,12 +118,12 @@ pub fn ArrayHashMap(
const Self = @This();
/// Create an ArrayHashMap instance which will use a specified allocator.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead.");
return initContext(allocator, undefined);
}
- pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+ pub fn initContext(allocator: Allocator, ctx: Context) Self {
return .{
.unmanaged = .{},
.allocator = allocator,
@@ -383,7 +383,7 @@ pub fn ArrayHashMap(
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context as this instance, but the specified
/// allocator.
- pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self {
+ pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self {
var other = try self.unmanaged.cloneContext(allocator, self.ctx);
return other.promoteContext(allocator, self.ctx);
}
@@ -396,7 +396,7 @@ pub fn ArrayHashMap(
}
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the specified allocator and context.
- pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
+ pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
var other = try self.unmanaged.cloneContext(allocator, ctx);
return other.promoteContext(allocator, ctx);
}
@@ -533,12 +533,12 @@ pub fn ArrayHashMapUnmanaged(
/// Convert from an unmanaged map to a managed map. After calling this,
/// the promoted map should no longer be used.
- pub fn promote(self: Self, allocator: *Allocator) Managed {
+ pub fn promote(self: Self, allocator: Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
return self.promoteContext(allocator, undefined);
}
- pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+ pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = allocator,
@@ -549,7 +549,7 @@ pub fn ArrayHashMapUnmanaged(
/// Frees the backing allocation and leaves the map in an undefined state.
/// Note that this does not free keys or values. You must take care of that
/// before calling this function, if it is needed.
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
self.entries.deinit(allocator);
if (self.index_header) |header| {
header.free(allocator);
@@ -570,7 +570,7 @@ pub fn ArrayHashMapUnmanaged(
}
/// Clears the map and releases the backing allocation
- pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ pub fn clearAndFree(self: *Self, allocator: Allocator) void {
self.entries.shrinkAndFree(allocator, 0);
if (self.index_header) |header| {
header.free(allocator);
@@ -633,24 +633,24 @@ pub fn ArrayHashMapUnmanaged(
/// Otherwise, puts a new item with undefined value, and
/// the `Entry` pointer points to it. Caller should then initialize
/// the value (but not the key).
- pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+ pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
return self.getOrPutContext(allocator, key, undefined);
}
- pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
- pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+ pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
- pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| {
// "If key exists this function cannot fail."
const index = self.getIndexAdapted(key, key_ctx) orelse return err;
@@ -731,12 +731,12 @@ pub fn ArrayHashMapUnmanaged(
}
}
- pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult {
+ pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
return self.getOrPutValueContext(allocator, key, value, undefined);
}
- pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!res.found_existing) {
res.key_ptr.* = key;
@@ -749,12 +749,12 @@ pub fn ArrayHashMapUnmanaged(
/// Increases capacity, guaranteeing that insertions up until the
/// `expected_count` will not cause an allocation, and therefore cannot fail.
- pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
return self.ensureTotalCapacityContext(allocator, new_capacity, undefined);
}
- pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
+ pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_capacity: usize, ctx: Context) !void {
if (new_capacity <= linear_scan_max) {
try self.entries.ensureTotalCapacity(allocator, new_capacity);
return;
@@ -781,7 +781,7 @@ pub fn ArrayHashMapUnmanaged(
/// therefore cannot fail.
pub fn ensureUnusedCapacity(
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
additional_capacity: usize,
) !void {
if (@sizeOf(ByIndexContext) != 0)
@@ -790,7 +790,7 @@ pub fn ArrayHashMapUnmanaged(
}
pub fn ensureUnusedCapacityContext(
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
additional_capacity: usize,
ctx: Context,
) !void {
@@ -808,24 +808,24 @@ pub fn ArrayHashMapUnmanaged(
/// Clobbers any existing data. To detect if a put would clobber
/// existing data, see `getOrPut`.
- pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
return self.putContext(allocator, key, value, undefined);
}
- pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
result.value_ptr.* = value;
}
/// Inserts a key-value pair into the hash map, asserting that no previous
/// entry with the same key is already present
- pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
return self.putNoClobberContext(allocator, key, value, undefined);
}
- pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
assert(!result.found_existing);
result.value_ptr.* = value;
@@ -859,12 +859,12 @@ pub fn ArrayHashMapUnmanaged(
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
- pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+ pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
return self.fetchPutContext(allocator, key, value, undefined);
}
- pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+ pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
const gop = try self.getOrPutContext(allocator, key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
@@ -1132,12 +1132,12 @@ pub fn ArrayHashMapUnmanaged(
/// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context and allocator as this instance.
- pub fn clone(self: Self, allocator: *Allocator) !Self {
+ pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
return self.cloneContext(allocator, undefined);
}
- pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self {
+ pub fn cloneContext(self: Self, allocator: Allocator, ctx: Context) !Self {
var other: Self = .{};
other.entries = try self.entries.clone(allocator);
errdefer other.entries.deinit(allocator);
@@ -1152,12 +1152,12 @@ pub fn ArrayHashMapUnmanaged(
/// Rebuilds the key indexes. If the underlying entries has been modified directly, users
/// can call `reIndex` to update the indexes to account for these new entries.
- pub fn reIndex(self: *Self, allocator: *Allocator) !void {
+ pub fn reIndex(self: *Self, allocator: Allocator) !void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead.");
return self.reIndexContext(allocator, undefined);
}
- pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void {
+ pub fn reIndexContext(self: *Self, allocator: Allocator, ctx: Context) !void {
if (self.entries.capacity <= linear_scan_max) return;
// We're going to rebuild the index header and replace the existing one (if any). The
// indexes should sized such that they will be at most 60% full.
@@ -1189,12 +1189,12 @@ pub fn ArrayHashMapUnmanaged(
/// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
/// index entries. Reduces allocated capacity.
- pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
+ pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead.");
return self.shrinkAndFreeContext(allocator, new_len, undefined);
}
- pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void {
+ pub fn shrinkAndFreeContext(self: *Self, allocator: Allocator, new_len: usize, ctx: Context) void {
// Remove index entries from the new length onwards.
// Explicitly choose to ONLY remove index entries and not the underlying array list
// entries as we're going to remove them in the subsequent shrink call.
@@ -1844,7 +1844,7 @@ const IndexHeader = struct {
/// Allocates an index header, and fills the entryIndexes array with empty.
/// The distance array contents are undefined.
- fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader {
+ fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader {
const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
const index_size = hash_map.capacityIndexSize(new_bit_index);
const nbytes = @sizeOf(IndexHeader) + index_size * len;
@@ -1858,7 +1858,7 @@ const IndexHeader = struct {
}
/// Releases the memory for a header and its associated arrays.
- fn free(header: *IndexHeader, allocator: *Allocator) void {
+ fn free(header: *IndexHeader, allocator: Allocator) void {
const index_size = hash_map.capacityIndexSize(header.bit_index);
const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 24049dad5c33..d88dae95ff73 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -42,12 +42,12 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize,
- allocator: *Allocator,
+ allocator: Allocator,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.capacity = 0,
@@ -58,7 +58,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Initialize with capacity to hold at least `num` elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
+ pub fn initCapacity(allocator: Allocator, num: usize) !Self {
var self = Self.init(allocator);
try self.ensureTotalCapacityPrecise(num);
return self;
@@ -74,7 +74,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self {
return Self{
.items = slice,
.capacity = slice.len,
@@ -457,33 +457,33 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Initialize with capacity to hold at least num elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
+ pub fn initCapacity(allocator: Allocator, num: usize) !Self {
var self = Self{};
try self.ensureTotalCapacityPrecise(allocator, num);
return self;
}
/// Release all allocated memory.
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.* = undefined;
}
/// Convert this list into an analogous memory-managed one.
/// The returned list has ownership of the underlying memory.
- pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) {
+ pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) {
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice {
+ pub fn toOwnedSlice(self: *Self, allocator: Allocator) Slice {
const result = allocator.shrink(self.allocatedSlice(), self.items.len);
self.* = Self{};
return result;
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSliceSentinel(self: *Self, allocator: *Allocator, comptime sentinel: T) ![:sentinel]T {
+ pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) ![:sentinel]T {
try self.append(allocator, sentinel);
const result = self.toOwnedSlice(allocator);
return result[0 .. result.len - 1 :sentinel];
@@ -492,7 +492,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Insert `item` at index `n`. Moves `list[n .. list.len]`
/// to higher indices to make room.
/// This operation is O(N).
- pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void {
+ pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) !void {
try self.ensureUnusedCapacity(allocator, 1);
self.items.len += 1;
@@ -503,7 +503,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to
/// higher indicices make room.
/// This operation is O(N).
- pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void {
+ pub fn insertSlice(self: *Self, allocator: Allocator, i: usize, items: []const T) !void {
try self.ensureUnusedCapacity(allocator, items.len);
self.items.len += items.len;
@@ -515,14 +515,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Grows list if `len < new_items.len`.
/// Shrinks list if `len > new_items.len`
/// Invalidates pointers if this ArrayList is resized.
- pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void {
+ pub fn replaceRange(self: *Self, allocator: Allocator, start: usize, len: usize, new_items: []const T) !void {
var managed = self.toManaged(allocator);
try managed.replaceRange(start, len, new_items);
self.* = managed.moveToUnmanaged();
}
/// Extend the list by 1 element. Allocates more memory as necessary.
- pub fn append(self: *Self, allocator: *Allocator, item: T) !void {
+ pub fn append(self: *Self, allocator: Allocator, item: T) !void {
const new_item_ptr = try self.addOne(allocator);
new_item_ptr.* = item;
}
@@ -563,7 +563,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
- pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void {
+ pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) !void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendSliceAssumeCapacity(items);
}
@@ -580,7 +580,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
pub const WriterContext = struct {
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
};
pub const Writer = if (T != u8)
@@ -590,7 +590,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite);
/// Initializes a Writer which will append to the list.
- pub fn writer(self: *Self, allocator: *Allocator) Writer {
+ pub fn writer(self: *Self, allocator: Allocator) Writer {
return .{ .context = .{ .self = self, .allocator = allocator } };
}
@@ -603,7 +603,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
- pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void {
+ pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) !void {
const old_len = self.items.len;
try self.resize(allocator, self.items.len + n);
mem.set(T, self.items[old_len..self.items.len], value);
@@ -621,13 +621,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
- pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void {
+ pub fn resize(self: *Self, allocator: Allocator, new_len: usize) !void {
try self.ensureTotalCapacity(allocator, new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
- pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
+ pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
assert(new_len <= self.items.len);
self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
@@ -653,7 +653,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
}
/// Invalidates all element pointers.
- pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ pub fn clearAndFree(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
@@ -663,7 +663,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Modify the array so that it can hold at least `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@@ -679,7 +679,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) !void {
if (self.capacity >= new_capacity) return;
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
@@ -691,7 +691,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Invalidates pointers if additional memory is needed.
pub fn ensureUnusedCapacity(
self: *Self,
- allocator: *Allocator,
+ allocator: Allocator,
additional_count: usize,
) !void {
return self.ensureTotalCapacity(allocator, self.items.len + additional_count);
@@ -706,7 +706,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
- pub fn addOne(self: *Self, allocator: *Allocator) !*T {
+ pub fn addOne(self: *Self, allocator: Allocator) !*T {
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(allocator, newlen);
return self.addOneAssumeCapacity();
@@ -726,7 +726,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
- pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T {
+ pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) !*[n]T {
const prev_len = self.items.len;
try self.resize(allocator, self.items.len + n);
return self.items[prev_len..][0..n];
@@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const a = &arena.allocator;
+ const a = arena.getAllocator();
const init = [_]i32{ 1, 2, 3, 4, 5 };
const new = [_]i32{ 0, 0, 0 };
@@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe
// use an arena allocator to make sure realloc returns error.OutOfMemory
var arena = std.heap.ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const a = &arena.allocator;
+ const a = arena.getAllocator();
{
var list = ArrayList(i32).init(a);
@@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
test "std.ArrayList(u0)" {
// An ArrayList on zero-sized types should not need to allocate
- const a = &testing.FailingAllocator.init(testing.allocator, 0).allocator;
+ const a = testing.FailingAllocator.init(testing.allocator, 0).getAllocator();
var list = ArrayList(u0).init(a);
defer list.deinit();
diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig
index c999162b3643..8174361800f5 100644
--- a/lib/std/ascii.zig
+++ b/lib/std/ascii.zig
@@ -301,7 +301,7 @@ test "lowerString" {
/// Allocates a lower case copy of `ascii_string`.
/// Caller owns returned string and must free with `allocator`.
-pub fn allocLowerString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
+pub fn allocLowerString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
const result = try allocator.alloc(u8, ascii_string.len);
return lowerString(result, ascii_string);
}
@@ -330,7 +330,7 @@ test "upperString" {
/// Allocates an upper case copy of `ascii_string`.
/// Caller owns returned string and must free with `allocator`.
-pub fn allocUpperString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
+pub fn allocUpperString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
const result = try allocator.alloc(u8, ascii_string.len);
return upperString(result, ascii_string);
}
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 62de8d9f10dc..3b4a14110c13 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -156,7 +156,7 @@ pub fn Queue(comptime T: type) type {
}
const Context = struct {
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
@@ -176,8 +176,8 @@ test "std.atomic.Queue" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
- var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
+ var a = fixed_buffer_allocator.getThreadSafeAllocator();
var queue = Queue(i32).init();
var context = Context{
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index 35f691425224..c1b368b57191 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -69,7 +69,7 @@ pub fn Stack(comptime T: type) type {
}
const Context = struct {
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
@@ -88,8 +88,8 @@ test "std.atomic.stack" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
- var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
+ var a = fixed_buffer_allocator.getThreadSafeAllocator();
var stack = Stack(i32).init();
var context = Context{
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index af960784f76e..2848305819f2 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -476,7 +476,7 @@ pub const DynamicBitSetUnmanaged = struct {
/// Creates a bit set with no elements present.
/// If bit_length is not zero, deinit must eventually be called.
- pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
var self = Self{};
try self.resize(bit_length, false, allocator);
return self;
@@ -484,7 +484,7 @@ pub const DynamicBitSetUnmanaged = struct {
/// Creates a bit set with all elements present.
/// If bit_length is not zero, deinit must eventually be called.
- pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
var self = Self{};
try self.resize(bit_length, true, allocator);
return self;
@@ -493,7 +493,7 @@ pub const DynamicBitSetUnmanaged = struct {
/// Resizes to a new bit_length. If the new length is larger
/// than the old length, fills any added bits with `fill`.
/// If new_len is not zero, deinit must eventually be called.
- pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void {
+ pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: Allocator) !void {
const old_len = self.bit_length;
const old_masks = numMasks(old_len);
@@ -556,12 +556,12 @@ pub const DynamicBitSetUnmanaged = struct {
/// deinitializes the array and releases its memory.
/// The passed allocator must be the same one used for
/// init* or resize in the past.
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
self.resize(0, false, allocator) catch unreachable;
}
/// Creates a duplicate of this bit set, using the new allocator.
- pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
const num_masks = numMasks(self.bit_length);
var copy = Self{};
try copy.resize(self.bit_length, false, new_allocator);
@@ -742,13 +742,13 @@ pub const DynamicBitSet = struct {
pub const ShiftInt = std.math.Log2Int(MaskInt);
/// The allocator used by this bit set
- allocator: *Allocator,
+ allocator: Allocator,
/// The number of valid items in this bit set
unmanaged: DynamicBitSetUnmanaged = .{},
/// Creates a bit set with no elements present.
- pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
return Self{
.unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator),
.allocator = allocator,
@@ -756,7 +756,7 @@ pub const DynamicBitSet = struct {
}
/// Creates a bit set with all elements present.
- pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+ pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
return Self{
.unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator),
.allocator = allocator,
@@ -777,7 +777,7 @@ pub const DynamicBitSet = struct {
}
/// Creates a duplicate of this bit set, using the new allocator.
- pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+ pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
return Self{
.unmanaged = try self.unmanaged.clone(new_allocator),
.allocator = new_allocator,
diff --git a/lib/std/buf_map.zig b/lib/std/buf_map.zig
index 1e4462e6aee4..5b26ae9684e9 100644
--- a/lib/std/buf_map.zig
+++ b/lib/std/buf_map.zig
@@ -14,7 +14,7 @@ pub const BufMap = struct {
/// Create a BufMap backed by a specific allocator.
/// That allocator will be used for both backing allocations
/// and string deduplication.
- pub fn init(allocator: *Allocator) BufMap {
+ pub fn init(allocator: Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
diff --git a/lib/std/buf_set.zig b/lib/std/buf_set.zig
index ce2d51b0569c..e68b24fbcc13 100644
--- a/lib/std/buf_set.zig
+++ b/lib/std/buf_set.zig
@@ -16,7 +16,7 @@ pub const BufSet = struct {
/// Create a BufSet using an allocator. The allocator will
/// be used internally for both backing allocations and
/// string duplication.
- pub fn init(a: *Allocator) BufSet {
+ pub fn init(a: Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
@@ -67,7 +67,7 @@ pub const BufSet = struct {
}
/// Get the allocator used by this set
- pub fn allocator(self: *const BufSet) *Allocator {
+ pub fn allocator(self: *const BufSet) Allocator {
return self.hash_map.allocator;
}
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 3f6c7aa94340..dba27f86b9c5 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -28,7 +28,7 @@ pub const OptionsStep = @import("build/OptionsStep.zig");
pub const Builder = struct {
install_tls: TopLevelStep,
uninstall_tls: TopLevelStep,
- allocator: *Allocator,
+ allocator: Allocator,
user_input_options: UserInputOptionsMap,
available_options_map: AvailableOptionsMap,
available_options_list: ArrayList(AvailableOption),
@@ -134,7 +134,7 @@ pub const Builder = struct {
};
pub fn create(
- allocator: *Allocator,
+ allocator: Allocator,
zig_exe: []const u8,
build_root: []const u8,
cache_root: []const u8,
@@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" {
defer arena.deinit();
const builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"zig",
"zig-cache",
"zig-cache",
@@ -3077,7 +3077,7 @@ pub const Step = struct {
custom,
};
- pub fn init(id: Id, name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step {
+ pub fn init(id: Id, name: []const u8, allocator: Allocator, makeFn: fn (*Step) anyerror!void) Step {
return Step{
.id = id,
.name = allocator.dupe(u8, name) catch unreachable,
@@ -3087,7 +3087,7 @@ pub const Step = struct {
.done_flag = false,
};
}
- pub fn initNoOp(id: Id, name: []const u8, allocator: *Allocator) Step {
+ pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
return init(id, name, allocator, makeNoOp);
}
@@ -3114,7 +3114,7 @@ pub const Step = struct {
}
};
-fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
const out_dir = fs.path.dirname(output_path) orelse ".";
const out_basename = fs.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
@@ -3138,7 +3138,7 @@ fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_maj
}
/// Returned slice must be freed by the caller.
-fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 {
+fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
const appdata_path = try fs.getAppDataDir(allocator, "vcpkg");
defer allocator.free(appdata_path);
@@ -3207,7 +3207,7 @@ test "Builder.dupePkg()" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"test",
"test",
"test",
@@ -3252,7 +3252,7 @@ test "LibExeObjStep.addPackage" {
defer arena.deinit();
var builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"test",
"test",
"test",
diff --git a/lib/std/build/InstallRawStep.zig b/lib/std/build/InstallRawStep.zig
index d87ff2fffdde..0f921d6622c5 100644
--- a/lib/std/build/InstallRawStep.zig
+++ b/lib/std/build/InstallRawStep.zig
@@ -40,7 +40,7 @@ const BinaryElfOutput = struct {
self.segments.deinit();
}
- pub fn parse(allocator: *Allocator, elf_file: File) !Self {
+ pub fn parse(allocator: Allocator, elf_file: File) !Self {
var self: Self = .{
.segments = ArrayList(*BinaryElfSegment).init(allocator),
.sections = ArrayList(*BinaryElfSection).init(allocator),
@@ -298,7 +298,7 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool {
return true;
}
-fn emitRaw(allocator: *Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
+fn emitRaw(allocator: Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
var elf_file = try fs.cwd().openFile(elf_path, .{});
defer elf_file.close();
diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig
index dfe512adecb9..d3ac0d419628 100644
--- a/lib/std/build/OptionsStep.zig
+++ b/lib/std/build/OptionsStep.zig
@@ -274,7 +274,7 @@ test "OptionsStep" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var builder = try Builder.create(
- &arena.allocator,
+ arena.getAllocator(),
"test",
"test",
"test",
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index e7cf56f39d6b..e0acc237d946 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -75,7 +75,7 @@ pub const StackTrace = struct {
};
const tty_config = std.debug.detectTTYConfig();
try writer.writeAll("\n");
- std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| {
+ std.debug.writeStackTrace(self, writer, arena.getAllocator(), debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
try writer.writeAll("\n");
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 2e1dfad00a95..cc9f1b28018d 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -23,7 +23,7 @@ pub const ChildProcess = struct {
handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
thread_handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stdin: ?File,
stdout: ?File,
@@ -90,7 +90,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
- pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
+ pub fn init(argv: []const []const u8, allocator: mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
child.* = ChildProcess{
.allocator = allocator,
@@ -329,7 +329,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
pub fn exec(args: struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
argv: []const []const u8,
cwd: ?[]const u8 = null,
cwd_dir: ?fs.Dir = null,
@@ -541,7 +541,7 @@ pub const ChildProcess = struct {
var arena_allocator = std.heap.ArenaAllocator.init(self.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
// The POSIX standard does not allow malloc() between fork() and execve(),
// and `self.allocator` may be a libc allocator.
@@ -931,7 +931,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
}
/// Caller must dealloc.
-fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![:0]u8 {
+fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![:0]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -1081,7 +1081,7 @@ fn readIntFd(fd: i32) !ErrInt {
}
/// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 {
+pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const BufMap) ![]u16 {
// count bytes needed
const max_chars_needed = x: {
var max_chars_needed: usize = 4; // 4 for the final 4 null bytes
@@ -1117,7 +1117,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
return allocator.shrink(result, i);
}
-pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
+pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
const envp_count = env_map.count();
const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null);
{
@@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
- const environ = try createNullDelimitedEnvMap(&arena.allocator, &envmap);
+ const environ = try createNullDelimitedEnvMap(arena.getAllocator(), &envmap);
try testing.expectEqual(@as(usize, 5), environ.len);
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index 961cd8ade6de..2bf0b1c44e35 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -98,7 +98,7 @@ pub const CoffError = error{
// Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
pub const Coff = struct {
in_file: File,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
coff_header: CoffHeader,
pe_header: OptionalHeader,
@@ -107,7 +107,7 @@ pub const Coff = struct {
guid: [16]u8,
age: u32,
- pub fn init(allocator: *mem.Allocator, in_file: File) Coff {
+ pub fn init(allocator: mem.Allocator, in_file: File) Coff {
return Coff{
.in_file = in_file,
.allocator = allocator,
@@ -324,7 +324,7 @@ pub const Coff = struct {
}
// Return an owned slice full of the section data
- pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: *mem.Allocator) ![]u8 {
+ pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
const sec = for (self.sections.items) |*sec| {
if (mem.eql(u8, sec.header.name[0..name.len], name)) {
break sec;
diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig
index 497b07d905e2..491b888812fc 100644
--- a/lib/std/compress/gzip.zig
+++ b/lib/std/compress/gzip.zig
@@ -24,7 +24,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
error{ CorruptedData, WrongChecksum };
pub const Reader = io.Reader(*Self, Error, read);
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
inflater: deflate.InflateStream(ReaderType),
in_reader: ReaderType,
hasher: std.hash.Crc32,
@@ -37,7 +37,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
modification_time: u32,
},
- fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+ fn init(allocator: mem.Allocator, source: ReaderType) !Self {
// gzip header format is specified in RFC1952
const header = try source.readBytesNoEof(10);
@@ -152,7 +152,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
};
}
-pub fn gzipStream(allocator: *mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
+pub fn gzipStream(allocator: mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
return GzipStream(@TypeOf(reader)).init(allocator, reader);
}
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
index f0f4ca2ff4d9..09d9c18e7248 100644
--- a/lib/std/compress/zlib.zig
+++ b/lib/std/compress/zlib.zig
@@ -17,13 +17,13 @@ pub fn ZlibStream(comptime ReaderType: type) type {
error{ WrongChecksum, Unsupported };
pub const Reader = io.Reader(*Self, Error, read);
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
inflater: deflate.InflateStream(ReaderType),
in_reader: ReaderType,
hasher: std.hash.Adler32,
window_slice: []u8,
- fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+ fn init(allocator: mem.Allocator, source: ReaderType) !Self {
// Zlib header format is specified in RFC1950
const header = try source.readBytesNoEof(2);
@@ -88,7 +88,7 @@ pub fn ZlibStream(comptime ReaderType: type) type {
};
}
-pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
+pub fn zlibStream(allocator: mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
return ZlibStream(@TypeOf(reader)).init(allocator, reader);
}
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index 66cd8b38f117..493f36ca94c0 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -201,7 +201,7 @@ fn initBlocks(
}
fn processBlocks(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
blocks: *Blocks,
time: u32,
memory: u32,
@@ -240,7 +240,7 @@ fn processBlocksSt(
}
fn processBlocksMt(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
blocks: *Blocks,
time: u32,
memory: u32,
@@ -480,7 +480,7 @@ fn indexAlpha(
///
/// Salt has to be at least 8 bytes length.
pub fn kdf(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
derived_key: []u8,
password: []const u8,
salt: []const u8,
@@ -524,7 +524,7 @@ const PhcFormatHasher = struct {
};
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
password: []const u8,
params: Params,
mode: Mode,
@@ -550,7 +550,7 @@ const PhcFormatHasher = struct {
}
pub fn verify(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
str: []const u8,
password: []const u8,
) HasherError!void {
@@ -579,7 +579,7 @@ const PhcFormatHasher = struct {
///
/// Only phc encoding is supported.
pub const HashOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
params: Params,
mode: Mode = .argon2id,
encoding: pwhash.Encoding = .phc,
@@ -609,7 +609,7 @@ pub fn strHash(
///
/// Allocator is required for argon2.
pub const VerifyOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
};
/// Verify that a previously computed hash is valid for a given password.
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
index d8c4d67453c7..bd3c9ca7d4e7 100644
--- a/lib/std/crypto/bcrypt.zig
+++ b/lib/std/crypto/bcrypt.zig
@@ -368,7 +368,7 @@ const CryptFormatHasher = struct {
/// Options for hashing a password.
pub const HashOptions = struct {
- allocator: ?*mem.Allocator = null,
+ allocator: ?mem.Allocator = null,
params: Params,
encoding: pwhash.Encoding,
};
@@ -394,7 +394,7 @@ pub fn strHash(
/// Options for hash verification.
pub const VerifyOptions = struct {
- allocator: ?*mem.Allocator = null,
+ allocator: ?mem.Allocator = null,
};
/// Verify that a previously computed hash is valid for a given password.
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 4836de032e0d..52e56ddf18be 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -363,7 +363,7 @@ pub fn main() !void {
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.process.argsAlloc(&fixed.allocator);
+ const args = try std.process.argsAlloc(fixed.getAllocator());
var filter: ?[]u8 = "";
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index b17952dcd6a9..e464cca28eaa 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -161,7 +161,7 @@ pub const Params = struct {
///
/// scrypt is defined in RFC 7914.
///
-/// allocator: *mem.Allocator.
+/// allocator: mem.Allocator.
///
/// derived_key: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length.
/// May be uninitialized. All bytes will be overwritten.
@@ -173,7 +173,7 @@ pub const Params = struct {
///
/// params: Params.
pub fn kdf(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
derived_key: []u8,
password: []const u8,
salt: []const u8,
@@ -406,7 +406,7 @@ const PhcFormatHasher = struct {
/// Return a non-deterministic hash of the password encoded as a PHC-format string
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
password: []const u8,
params: Params,
buf: []u8,
@@ -429,7 +429,7 @@ const PhcFormatHasher = struct {
/// Verify a password against a PHC-format encoded string
pub fn verify(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
str: []const u8,
password: []const u8,
) HasherError!void {
@@ -455,7 +455,7 @@ const CryptFormatHasher = struct {
/// Return a non-deterministic hash of the password encoded into the modular crypt format
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
password: []const u8,
params: Params,
buf: []u8,
@@ -478,7 +478,7 @@ const CryptFormatHasher = struct {
/// Verify a password against a string in modular crypt format
pub fn verify(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
str: []const u8,
password: []const u8,
) HasherError!void {
@@ -497,7 +497,7 @@ const CryptFormatHasher = struct {
///
/// Allocator is required for scrypt.
pub const HashOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
params: Params,
encoding: pwhash.Encoding,
};
@@ -520,7 +520,7 @@ pub fn strHash(
///
/// Allocator is required for scrypt.
pub const VerifyOptions = struct {
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
};
/// Verify that a previously computed hash is valid for a given password.
diff --git a/lib/std/cstr.zig b/lib/std/cstr.zig
index 64beb378d991..068fc419ac57 100644
--- a/lib/std/cstr.zig
+++ b/lib/std/cstr.zig
@@ -33,7 +33,7 @@ fn testCStrFnsImpl() !void {
/// Returns a mutable, null-terminated slice with the same length as `slice`.
/// Caller owns the returned memory.
-pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![:0]u8 {
+pub fn addNullByte(allocator: mem.Allocator, slice: []const u8) ![:0]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@@ -48,13 +48,13 @@ test "addNullByte" {
}
pub const NullTerminated2DArray = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
byte_count: usize,
ptr: ?[*:null]?[*:0]u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
- pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+ pub fn fromSlices(allocator: mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 531872581aa1..b6990d675dc1 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -29,7 +29,7 @@ pub const LineInfo = struct {
line: u64,
column: u64,
file_name: []const u8,
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
pub fn deinit(self: LineInfo) void {
const allocator = self.allocator orelse return;
@@ -339,7 +339,7 @@ const RESET = "\x1b[0m";
pub fn writeStackTrace(
stack_trace: std.builtin.StackTrace,
out_stream: anytype,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
debug_info: *DebugInfo,
tty_config: TTY.Config,
) !void {
@@ -662,7 +662,7 @@ pub const OpenSelfDebugInfoError = error{
};
/// TODO resources https://github.com/ziglang/zig/issues/4353
-pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
+pub fn openSelfDebugInfo(allocator: mem.Allocator) anyerror!DebugInfo {
nosuspend {
if (builtin.strip_debug_info)
return error.MissingDebugInfo;
@@ -688,7 +688,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
/// it themselves, even on error.
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// TODO it's weird to take ownership even on error, rework this code.
-fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo {
+fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo {
nosuspend {
errdefer coff_file.close();
@@ -755,7 +755,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
/// it themselves, even on error.
/// TODO resources https://github.com/ziglang/zig/issues/4353
/// TODO it's weird to take ownership even on error, rework this code.
-pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo {
+pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo {
nosuspend {
const mapped_mem = try mapWholeFile(elf_file);
const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
@@ -827,7 +827,7 @@ pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugI
/// This takes ownership of macho_file: users of this function should not close
/// it themselves, even on error.
/// TODO it's weird to take ownership even on error, rework this code.
-fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo {
+fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo {
const mapped_mem = try mapWholeFile(macho_file);
const hdr = @ptrCast(
@@ -1025,10 +1025,10 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
}
pub const DebugInfo = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
address_map: std.AutoHashMap(usize, *ModuleDebugInfo),
- pub fn init(allocator: *mem.Allocator) DebugInfo {
+ pub fn init(allocator: mem.Allocator) DebugInfo {
return DebugInfo{
.allocator = allocator,
.address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator),
@@ -1278,7 +1278,7 @@ pub const ModuleDebugInfo = switch (native_os) {
addr_table: std.StringHashMap(u64),
};
- pub fn allocator(self: @This()) *mem.Allocator {
+ pub fn allocator(self: @This()) mem.Allocator {
return self.ofiles.allocator;
}
@@ -1470,7 +1470,7 @@ pub const ModuleDebugInfo = switch (native_os) {
debug_data: PdbOrDwarf,
coff: *coff.Coff,
- pub fn allocator(self: @This()) *mem.Allocator {
+ pub fn allocator(self: @This()) mem.Allocator {
return self.coff.allocator;
}
@@ -1560,14 +1560,15 @@ fn getSymbolFromDwarf(address: u64, di: *DW.DwarfInfo) !SymbolInfo {
}
/// TODO multithreaded awareness
-var debug_info_allocator: ?*mem.Allocator = null;
+var debug_info_allocator: ?mem.Allocator = null;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
-fn getDebugInfoAllocator() *mem.Allocator {
+fn getDebugInfoAllocator() mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- debug_info_allocator = &debug_info_arena_allocator.allocator;
- return &debug_info_arena_allocator.allocator;
+ const allocator = debug_info_arena_allocator.getAllocator();
+ debug_info_allocator = allocator;
+ return allocator;
}
/// Whether or not the current target can print useful debug information when a segfault occurs.
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index 26031be662c3..eb204d15eef0 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -466,7 +466,7 @@ fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool)
}
// TODO the nosuspends here are workarounds
-fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
+fn readAllocBytes(allocator: mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile;
@@ -481,18 +481,18 @@ fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64
@as(u64, try in_stream.readInt(u32, endian));
}
-fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: mem.Allocator, in_stream: anytype, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
// TODO the nosuspends here are workarounds
-fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
+fn parseFormValueConstant(allocator: mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
_ = allocator;
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
// `nosuspend` should be removed from all the function calls once it is fixed.
@@ -520,7 +520,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
}
// TODO the nosuspends here are workarounds
-fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
+fn parseFormValueRef(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
_ = allocator;
return FormValue{
.Ref = switch (size) {
@@ -535,7 +535,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.
}
// TODO the nosuspends here are workarounds
-fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
+fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
return switch (form_id) {
FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
@@ -604,7 +604,7 @@ pub const DwarfInfo = struct {
compile_unit_list: ArrayList(CompileUnit) = undefined,
func_list: ArrayList(Func) = undefined,
- pub fn allocator(self: DwarfInfo) *mem.Allocator {
+ pub fn allocator(self: DwarfInfo) mem.Allocator {
return self.abbrev_table_list.allocator;
}
@@ -1092,7 +1092,7 @@ pub const DwarfInfo = struct {
/// the DwarfInfo fields before calling. These fields can be left undefined:
/// * abbrev_table_list
/// * compile_unit_list
-pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void {
+pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator);
di.compile_unit_list = ArrayList(CompileUnit).init(allocator);
di.func_list = ArrayList(Func).init(allocator);
diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig
index 599e8d9496ba..85eeeaf2b7c3 100644
--- a/lib/std/event/group.zig
+++ b/lib/std/event/group.zig
@@ -15,7 +15,7 @@ pub fn Group(comptime ReturnType: type) type {
frame_stack: Stack,
alloc_stack: AllocStack,
lock: Lock,
- allocator: *Allocator,
+ allocator: Allocator,
const Self = @This();
@@ -31,7 +31,7 @@ pub fn Group(comptime ReturnType: type) type {
handle: anyframe->ReturnType,
};
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.frame_stack = Stack.init(),
.alloc_stack = AllocStack.init(),
@@ -127,7 +127,7 @@ test "std.event.Group" {
_ = async testGroup(std.heap.page_allocator);
}
-fn testGroup(allocator: *Allocator) callconv(.Async) void {
+fn testGroup(allocator: Allocator) callconv(.Async) void {
var count: usize = 0;
var group = Group(void).init(allocator);
var sleep_a_little_frame = async sleepALittle(&count);
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 042c8bc3ccbb..413b23cd48af 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -727,7 +727,7 @@ pub const Loop = struct {
/// with `allocator` and freed when the function returns.
/// `func` must return void and it can be an async function.
/// Yields to the event loop, running the function on the next tick.
- pub fn runDetached(self: *Loop, alloc: *mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
+ pub fn runDetached(self: *Loop, alloc: mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
if (!std.io.is_async) @compileError("Can't use runDetached in non-async mode!");
if (@TypeOf(@call(.{}, func, args)) != void) {
@compileError("`func` must not have a return value");
@@ -735,7 +735,7 @@ pub const Loop = struct {
const Wrapper = struct {
const Args = @TypeOf(args);
- fn run(func_args: Args, loop: *Loop, allocator: *mem.Allocator) void {
+ fn run(func_args: Args, loop: *Loop, allocator: mem.Allocator) void {
loop.beginOneEvent();
loop.yield();
@call(.{}, func, func_args); // compile error when called with non-void ret type
diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig
index fd42842a3a97..c19330d5a922 100644
--- a/lib/std/event/rwlock.zig
+++ b/lib/std/event/rwlock.zig
@@ -226,7 +226,7 @@ test "std.event.RwLock" {
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
try testing.expectEqualSlices(i32, expected_result, shared_test_data);
}
-fn testLock(allocator: *Allocator, lock: *RwLock) callconv(.Async) void {
+fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
var read_nodes: [100]Loop.NextTickNode = undefined;
for (read_nodes) |*read_node| {
const frame = allocator.create(@Frame(readRunner)) catch @panic("memory");
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index d5b6285c00d3..b7c8f761d3dd 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -33,7 +33,7 @@ pub fn LinearFifo(
};
return struct {
- allocator: if (buffer_type == .Dynamic) *Allocator else void,
+ allocator: if (buffer_type == .Dynamic) Allocator else void,
buf: if (buffer_type == .Static) [buffer_type.Static]T else []T,
head: usize,
count: usize,
@@ -69,7 +69,7 @@ pub fn LinearFifo(
}
},
.Dynamic => struct {
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return .{
.allocator = allocator,
.buf = &[_]T{},
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 24f5daa09592..97dfcc78ba30 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -1803,7 +1803,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) u64 {
pub const AllocPrintError = error{OutOfMemory};
-pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
+pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) {
// Output too long. Can't possibly allocate enough memory to display it.
error.Overflow => return error.OutOfMemory,
@@ -1816,7 +1816,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: any
pub const allocPrint0 = @compileError("deprecated; use allocPrintZ");
-pub fn allocPrintZ(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
+pub fn allocPrintZ(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
const result = try allocPrint(allocator, fmt ++ "\x00", args);
return result[0 .. result.len - 1 :0];
}
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index c10ded3bdc2c..4d900d2e6756 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -64,7 +64,7 @@ pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) {
};
/// TODO remove the allocator requirement from this API
-pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (cwd().symLink(existing_path, new_path, .{})) {
return;
} else |err| switch (err) {
@@ -875,7 +875,7 @@ pub const Dir = struct {
/// Must call `Walker.deinit` when done.
/// The order of returned file system entries is undefined.
/// `self` will not be closed after walking it.
- pub fn walk(self: Dir, allocator: *Allocator) !Walker {
+ pub fn walk(self: Dir, allocator: Allocator) !Walker {
var name_buffer = std.ArrayList(u8).init(allocator);
errdefer name_buffer.deinit();
@@ -1393,7 +1393,7 @@ pub const Dir = struct {
/// Same as `Dir.realpath` except caller must free the returned memory.
/// See also `Dir.realpath`.
- pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 {
+ pub fn realpathAlloc(self: Dir, allocator: Allocator, pathname: []const u8) ![]u8 {
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
// have a variant that takes an arbitrary-size buffer.
// TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
@@ -1804,7 +1804,7 @@ pub const Dir = struct {
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
- pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
+ pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
}
@@ -1815,7 +1815,7 @@ pub const Dir = struct {
/// Allows specifying alignment and a sentinel value.
pub fn readFileAllocOptions(
self: Dir,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
file_path: []const u8,
max_bytes: usize,
size_hint: ?usize,
@@ -2464,7 +2464,7 @@ pub const SelfExePathError = os.ReadLinkError || os.SysCtlError || os.RealPathEr
/// `selfExePath` except allocates the result on the heap.
/// Caller owns returned memory.
-pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExePathAlloc(allocator: Allocator) ![]u8 {
// Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
// system, readlink will completely fail to return a result larger than
// PATH_MAX even if given a sufficiently large buffer. This makes it
@@ -2573,7 +2573,7 @@ pub fn selfExePathW() [:0]const u16 {
/// `selfExeDirPath` except allocates the result on the heap.
/// Caller owns returned memory.
-pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 {
// Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
// system, readlink will completely fail to return a result larger than
// PATH_MAX even if given a sufficiently large buffer. This makes it
@@ -2596,7 +2596,7 @@ pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 {
/// `realpath`, except caller must free the returned memory.
/// See also `Dir.realpath`.
-pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
+pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
// Use of MAX_PATH_BYTES here is valid as the realpath function does not
// have a variant that takes an arbitrary-size buffer.
// TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 268de8f3c814..6fa46579fd49 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -420,7 +420,7 @@ pub const File = struct {
/// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
- pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
+ pub fn readToEndAlloc(self: File, allocator: mem.Allocator, max_bytes: usize) ![]u8 {
return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
}
@@ -432,7 +432,7 @@ pub const File = struct {
/// Allows specifying alignment and a sentinel value.
pub fn readToEndAllocOptions(
self: File,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
max_bytes: usize,
size_hint: ?usize,
comptime alignment: u29,
diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig
index 2501a5194b8b..e2a9c5438f7e 100644
--- a/lib/std/fs/get_app_data_dir.zig
+++ b/lib/std/fs/get_app_data_dir.zig
@@ -12,7 +12,7 @@ pub const GetAppDataDirError = error{
/// Caller owns returned memory.
/// TODO determine if we can remove the allocator requirement
-pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
+pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
switch (builtin.os.tag) {
.windows => {
var dir_path_ptr: [*:0]u16 = undefined;
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index 6372757d372e..323f974255d7 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -35,7 +35,7 @@ pub fn isSep(byte: u8) bool {
/// This is different from mem.join in that the separator will not be repeated if
/// it is found at the end or beginning of a pair of consecutive paths.
-fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
+fn joinSepMaybeZ(allocator: Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
if (paths.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
// Find first non-empty path index.
@@ -99,13 +99,13 @@ fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) boo
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, paths: []const []const u8) ![]u8 {
return joinSepMaybeZ(allocator, sep, isSep, paths, false);
}
/// Naively combines a series of paths with the native path seperator and null terminator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn joinZ(allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
+pub fn joinZ(allocator: Allocator, paths: []const []const u8) ![:0]u8 {
const out = try joinSepMaybeZ(allocator, sep, isSep, paths, true);
return out[0 .. out.len - 1 :0];
}
@@ -445,7 +445,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (native_os == .windows) {
return resolveWindows(allocator, paths);
} else {
@@ -461,7 +461,7 @@ pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
/// Note: all usage of this function should be audited due to the existence of symlinks.
/// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
return process.getCwdAlloc(allocator);
@@ -647,7 +647,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Note: all usage of this function should be audited due to the existence of symlinks.
/// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(native_os != .windows); // resolvePosix called on windows can't use getCwd
return process.getCwdAlloc(allocator);
@@ -1058,7 +1058,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) !void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
if (native_os == .windows) {
return relativeWindows(allocator, from, to);
} else {
@@ -1066,7 +1066,7 @@ pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
-pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, &[_][]const u8{from});
defer allocator.free(resolved_from);
@@ -1139,7 +1139,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
return [_]u8{};
}
-pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, &[_][]const u8{from});
defer allocator.free(resolved_from);
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index f2b584d6d465..437ff5620d7c 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -52,9 +52,11 @@ test "accessAbsolute" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
+
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
try fs.accessAbsolute(base_path, .{});
@@ -69,9 +71,11 @@ test "openDirAbsolute" {
try tmp.dir.makeDir("subdir");
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
+
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
{
@@ -80,8 +84,8 @@ test "openDirAbsolute" {
}
for ([_][]const u8{ ".", ".." }) |sub_path| {
- const dir_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, sub_path });
- defer arena.allocator.free(dir_path);
+ const dir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, sub_path });
+ defer allocator.free(dir_path);
var dir = try fs.openDirAbsolute(dir_path, .{});
defer dir.close();
}
@@ -107,12 +111,12 @@ test "readLinkAbsolute" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
- const allocator = &arena.allocator;
{
const target_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "file.txt" });
@@ -158,15 +162,16 @@ test "Dir.Iterator" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
- var entries = std.ArrayList(Dir.Entry).init(&arena.allocator);
+ var entries = std.ArrayList(Dir.Entry).init(allocator);
// Create iterator.
var iter = tmp_dir.dir.iterate();
while (try iter.next()) |entry| {
// We cannot just store `entry` as on Windows, we're re-using the name buffer
// which means we'll actually share the `name` pointer between entries!
- const name = try arena.allocator.dupe(u8, entry.name);
+ const name = try allocator.dupe(u8, entry.name);
try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
}
@@ -202,25 +207,26 @@ test "Dir.realpath smoke test" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
// First, test non-alloc version
{
var buf1: [fs.MAX_PATH_BYTES]u8 = undefined;
const file_path = try tmp_dir.dir.realpath("test_file", buf1[0..]);
- const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+ const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
try testing.expect(mem.eql(u8, file_path, expected_path));
}
// Next, test alloc version
{
- const file_path = try tmp_dir.dir.realpathAlloc(&arena.allocator, "test_file");
- const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+ const file_path = try tmp_dir.dir.realpathAlloc(allocator, "test_file");
+ const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
try testing.expect(mem.eql(u8, file_path, expected_path));
}
@@ -476,11 +482,11 @@ test "renameAbsolute" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
try testing.expectError(error.FileNotFound, fs.renameAbsolute(
@@ -987,11 +993,11 @@ test ". and .. in absolute functions" {
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
const subdir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "./subdir" });
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index 528ccfc0f15c..1a033653d3d5 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -80,7 +80,7 @@ pub const PreopenList = struct {
pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError;
/// Deinitialize with `deinit`.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{ .buffer = InnerList.init(allocator) };
}
diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig
index 56544162c8a8..c103925bdd8c 100644
--- a/lib/std/fs/watch.zig
+++ b/lib/std/fs/watch.zig
@@ -30,7 +30,7 @@ pub fn Watch(comptime V: type) type {
return struct {
channel: event.Channel(Event.Error!Event),
os_data: OsData,
- allocator: *Allocator,
+ allocator: Allocator,
const OsData = switch (builtin.os.tag) {
// TODO https://github.com/ziglang/zig/issues/3778
@@ -96,7 +96,7 @@ pub fn Watch(comptime V: type) type {
pub const Error = WatchEventError;
};
- pub fn init(allocator: *Allocator, event_buf_count: usize) !*Self {
+ pub fn init(allocator: Allocator, event_buf_count: usize) !*Self {
const self = try allocator.create(Self);
errdefer allocator.destroy(self);
@@ -648,7 +648,7 @@ test "write a file, watch it, write it again, delete it" {
return testWriteWatchWriteDelete(std.testing.allocator);
}
-fn testWriteWatchWriteDelete(allocator: *Allocator) !void {
+fn testWriteWatchWriteDelete(allocator: Allocator) !void {
const file_path = try std.fs.path.join(allocator, &[_][]const u8{ test_tmp_dir, "file.txt" });
defer allocator.free(file_path);
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index 5663bed2491e..22fd6526f475 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -309,7 +309,7 @@ test "hash struct deep" {
const Self = @This();
- pub fn init(allocator: *mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
+ pub fn init(allocator: mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
const ptr = try allocator.create(bool);
ptr.* = c_;
return Self{ .a = a_, .b = b_, .c = ptr };
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index c145fcbae299..5b278ca0b11e 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -165,7 +165,7 @@ pub fn main() !void {
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.process.argsAlloc(&fixed.allocator);
+ const args = try std.process.argsAlloc(fixed.getAllocator());
var filter: ?[]u8 = "";
var count: usize = mode(128 * MiB);
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index cd23ccd39e20..5356bbff1a2f 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -363,7 +363,7 @@ pub fn HashMap(
comptime verifyContext(Context, K, K, u64);
return struct {
unmanaged: Unmanaged,
- allocator: *Allocator,
+ allocator: Allocator,
ctx: Context,
/// The type of the unmanaged hash map underlying this wrapper
@@ -390,7 +390,7 @@ pub fn HashMap(
/// Create a managed hash map with an empty context.
/// If the context is not zero-sized, you must use
/// initContext(allocator, ctx) instead.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
if (@sizeOf(Context) != 0) {
@compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
}
@@ -402,7 +402,7 @@ pub fn HashMap(
}
/// Create a managed hash map with a context
- pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+ pub fn initContext(allocator: Allocator, ctx: Context) Self {
return .{
.unmanaged = .{},
.allocator = allocator,
@@ -636,7 +636,7 @@ pub fn HashMap(
}
/// Creates a copy of this map, using a specified allocator
- pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self {
+ pub fn cloneWithAllocator(self: Self, new_allocator: Allocator) !Self {
var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
return other.promoteContext(new_allocator, self.ctx);
}
@@ -650,7 +650,7 @@ pub fn HashMap(
/// Creates a copy of this map, using a specified allocator and context.
pub fn cloneWithAllocatorAndContext(
self: Self,
- new_allocator: *Allocator,
+ new_allocator: Allocator,
new_ctx: anytype,
) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
@@ -841,13 +841,13 @@ pub fn HashMapUnmanaged(
pub const Managed = HashMap(K, V, Context, max_load_percentage);
- pub fn promote(self: Self, allocator: *Allocator) Managed {
+ pub fn promote(self: Self, allocator: Allocator) Managed {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
return promoteContext(self, allocator, undefined);
}
- pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+ pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
return .{
.unmanaged = self,
.allocator = allocator,
@@ -859,7 +859,7 @@ pub fn HashMapUnmanaged(
return size * 100 < max_load_percentage * cap;
}
- pub fn deinit(self: *Self, allocator: *Allocator) void {
+ pub fn deinit(self: *Self, allocator: Allocator) void {
self.deallocate(allocator);
self.* = undefined;
}
@@ -872,20 +872,20 @@ pub fn HashMapUnmanaged(
pub const ensureCapacity = @compileError("deprecated; call `ensureUnusedCapacity` or `ensureTotalCapacity`");
- pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void {
+ pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_size: Size) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
return ensureTotalCapacityContext(self, allocator, new_size, undefined);
}
- pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void {
+ pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) !void {
if (new_size > self.size)
try self.growIfNeeded(allocator, new_size - self.size, ctx);
}
- pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void {
+ pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) !void {
return ensureUnusedCapacityContext(self, allocator, additional_size, undefined);
}
- pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void {
+ pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) !void {
return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx);
}
@@ -897,7 +897,7 @@ pub fn HashMapUnmanaged(
}
}
- pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+ pub fn clearAndFree(self: *Self, allocator: Allocator) void {
self.deallocate(allocator);
self.size = 0;
self.available = 0;
@@ -962,12 +962,12 @@ pub fn HashMapUnmanaged(
}
/// Insert an entry in the map. Assumes it is not already present.
- pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
return self.putNoClobberContext(allocator, key, value, undefined);
}
- pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
assert(!self.containsContext(key, ctx));
try self.growIfNeeded(allocator, 1, ctx);
@@ -1021,12 +1021,12 @@ pub fn HashMapUnmanaged(
}
/// Inserts a new `Entry` into the hash map, returning the previous one, if any.
- pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+ pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
return self.fetchPutContext(allocator, key, value, undefined);
}
- pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+ pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
const gop = try self.getOrPutContext(allocator, key, ctx);
var result: ?KV = null;
if (gop.found_existing) {
@@ -1157,12 +1157,12 @@ pub fn HashMapUnmanaged(
}
/// Insert an entry if the associated key is not already present, otherwise update preexisting value.
- pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+ pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
return self.putContext(allocator, key, value, undefined);
}
- pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+ pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
const result = try self.getOrPutContext(allocator, key, ctx);
result.value_ptr.* = value;
}
@@ -1231,24 +1231,24 @@ pub fn HashMapUnmanaged(
return null;
}
- pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+ pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
return self.getOrPutContext(allocator, key, undefined);
}
- pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
if (!gop.found_existing) {
gop.key_ptr.* = key;
}
return gop;
}
- pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+ pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
}
- pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+ pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
self.growIfNeeded(allocator, 1, ctx) catch |err| {
// If allocation fails, try to do the lookup anyway.
// If we find an existing item, we can return it.
@@ -1341,12 +1341,12 @@ pub fn HashMapUnmanaged(
};
}
- pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry {
+ pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !Entry {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
return self.getOrPutValueContext(allocator, key, value, undefined);
}
- pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry {
+ pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !Entry {
const res = try self.getOrPutAdapted(allocator, key, ctx);
if (!res.found_existing) {
res.key_ptr.* = key;
@@ -1403,18 +1403,18 @@ pub fn HashMapUnmanaged(
return @truncate(Size, max_load - self.available);
}
- fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void {
+ fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) !void {
if (new_count > self.available) {
try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
}
}
- pub fn clone(self: Self, allocator: *Allocator) !Self {
+ pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(Context) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
return self.cloneContext(allocator, @as(Context, undefined));
}
- pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
+ pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
if (self.size == 0)
return other;
@@ -1439,7 +1439,7 @@ pub fn HashMapUnmanaged(
return other;
}
- fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void {
+ fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) !void {
@setCold(true);
const new_cap = std.math.max(new_capacity, minimal_capacity);
assert(new_cap > self.capacity());
@@ -1470,7 +1470,7 @@ pub fn HashMapUnmanaged(
std.mem.swap(Self, self, &map);
}
- fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
+ fn allocate(self: *Self, allocator: Allocator, new_capacity: Size) !void {
const header_align = @alignOf(Header);
const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
@@ -1503,7 +1503,7 @@ pub fn HashMapUnmanaged(
self.metadata = @intToPtr([*]Metadata, metadata);
}
- fn deallocate(self: *Self, allocator: *Allocator) void {
+ fn deallocate(self: *Self, allocator: Allocator) void {
if (self.metadata == null) return;
const header_align = @alignOf(Header);
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index fcea90d751ee..c265fc240861 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -97,13 +97,12 @@ const CAllocator = struct {
}
fn alloc(
- allocator: *Allocator,
+ _: *u1,
len: usize,
alignment: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
- _ = allocator;
_ = return_address;
assert(len > 0);
assert(std.math.isPowerOfTwo(alignment));
@@ -124,14 +123,13 @@ const CAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ _: *u1,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
- _ = allocator;
_ = buf_align;
_ = return_address;
if (new_len == 0) {
@@ -154,10 +152,11 @@ const CAllocator = struct {
/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
-pub const c_allocator = &c_allocator_state;
-var c_allocator_state = Allocator{
- .allocFn = CAllocator.alloc,
- .resizeFn = CAllocator.resize,
+pub const c_allocator = blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, CAllocator.alloc, CAllocator.resize);
};
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
@@ -165,20 +164,20 @@ var c_allocator_state = Allocator{
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case
/// than `c_allocator`.
-pub const raw_c_allocator = &raw_c_allocator_state;
-var raw_c_allocator_state = Allocator{
- .allocFn = rawCAlloc,
- .resizeFn = rawCResize,
+pub const raw_c_allocator = blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, rawCAlloc, rawCResize);
};
fn rawCAlloc(
- self: *Allocator,
+ _: *u1,
len: usize,
ptr_align: u29,
len_align: u29,
ret_addr: usize,
) Allocator.Error![]u8 {
- _ = self;
_ = len_align;
_ = ret_addr;
assert(ptr_align <= @alignOf(std.c.max_align_t));
@@ -187,14 +186,13 @@ fn rawCAlloc(
}
fn rawCResize(
- self: *Allocator,
+ _: *u1,
buf: []u8,
old_align: u29,
new_len: usize,
len_align: u29,
ret_addr: usize,
) Allocator.Error!usize {
- _ = self;
_ = old_align;
_ = ret_addr;
if (new_len == 0) {
@@ -210,19 +208,18 @@ fn rawCResize(
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (builtin.target.isWasm())
- &wasm_page_allocator_state
-else if (builtin.target.os.tag == .freestanding)
+blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, WasmPageAllocator.alloc, WasmPageAllocator.resize);
+} else if (builtin.target.os.tag == .freestanding)
root.os.heap.page_allocator
-else
- &page_allocator_state;
-
-var page_allocator_state = Allocator{
- .allocFn = PageAllocator.alloc,
- .resizeFn = PageAllocator.resize,
-};
-var wasm_page_allocator_state = Allocator{
- .allocFn = WasmPageAllocator.alloc,
- .resizeFn = WasmPageAllocator.resize,
+else blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, PageAllocator.alloc, PageAllocator.resize);
};
/// Verifies that the adjusted length will still map to the full length
@@ -236,8 +233,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
- fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
- _ = allocator;
+ fn alloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
_ = ra;
assert(n > 0);
const aligned_len = mem.alignForward(n, mem.page_size);
@@ -335,14 +331,13 @@ const PageAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ _: *u1,
buf_unaligned: []u8,
buf_align: u29,
new_size: usize,
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
- _ = allocator;
_ = buf_align;
_ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
@@ -492,8 +487,7 @@ const WasmPageAllocator = struct {
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
- fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
- _ = allocator;
+ fn alloc(_: *u1, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
_ = ra;
const page_count = nPages(len);
const page_idx = try allocPages(page_count, alignment);
@@ -548,14 +542,13 @@ const WasmPageAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ _: *u1,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) error{OutOfMemory}!usize {
- _ = allocator;
_ = buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
@@ -572,21 +565,20 @@ const WasmPageAllocator = struct {
pub const HeapAllocator = switch (builtin.os.tag) {
.windows => struct {
- allocator: Allocator,
heap_handle: ?HeapHandle,
const HeapHandle = os.windows.HANDLE;
pub fn init() HeapAllocator {
return HeapAllocator{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.heap_handle = null,
};
}
+ pub fn getAllocator(self: *HeapAllocator) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
pub fn deinit(self: *HeapAllocator) void {
if (self.heap_handle) |heap_handle| {
os.windows.HeapDestroy(heap_handle);
@@ -598,14 +590,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
fn alloc(
- allocator: *Allocator,
+ self: *HeapAllocator,
n: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
_ = return_address;
- const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
@@ -632,7 +623,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
fn resize(
- allocator: *Allocator,
+ self: *HeapAllocator,
buf: []u8,
buf_align: u29,
new_size: usize,
@@ -641,7 +632,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
) error{OutOfMemory}!usize {
_ = buf_align;
_ = return_address;
- const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
if (new_size == 0) {
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
return 0;
@@ -682,21 +672,27 @@ fn sliceContainsSlice(container: []u8, slice: []u8) bool {
}
pub const FixedBufferAllocator = struct {
- allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.buffer = buffer,
.end_index = 0,
};
}
+ /// *WARNING* using this at the same time as the interface returned by `getThreadSafeAllocator` is not thread safe
+ pub fn getAllocator(self: *FixedBufferAllocator) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
+ /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
+ /// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
+ pub fn getThreadSafeAllocator(self: *FixedBufferAllocator) Allocator {
+ return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
+ }
+
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
return sliceContainsPtr(self.buffer, ptr);
}
@@ -712,10 +708,9 @@ pub const FixedBufferAllocator = struct {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+ fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
- const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
return error.OutOfMemory;
const adjusted_index = self.end_index + adjust_off;
@@ -730,7 +725,7 @@ pub const FixedBufferAllocator = struct {
}
fn resize(
- allocator: *Allocator,
+ self: *FixedBufferAllocator,
buf: []u8,
buf_align: u29,
new_size: usize,
@@ -739,7 +734,6 @@ pub const FixedBufferAllocator = struct {
) Allocator.Error!usize {
_ = buf_align;
_ = return_address;
- const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(self.ownsSlice(buf)); // sanity check
if (!self.isLastAllocation(buf)) {
@@ -762,65 +756,34 @@ pub const FixedBufferAllocator = struct {
return new_size;
}
+ fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+ _ = len_align;
+ _ = ra;
+ var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
+ while (true) {
+ const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
+ return error.OutOfMemory;
+ const adjusted_index = end_index + adjust_off;
+ const new_end_index = adjusted_index + n;
+ if (new_end_index > self.buffer.len) {
+ return error.OutOfMemory;
+ }
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
+ }
+ }
+
pub fn reset(self: *FixedBufferAllocator) void {
self.end_index = 0;
}
};
-pub const ThreadSafeFixedBufferAllocator = blk: {
- if (builtin.single_threaded) {
- break :blk FixedBufferAllocator;
- } else {
- // lock free
- break :blk struct {
- allocator: Allocator,
- end_index: usize,
- buffer: []u8,
-
- pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
- return ThreadSafeFixedBufferAllocator{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = Allocator.noResize,
- },
- .buffer = buffer,
- .end_index = 0,
- };
- }
-
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
- _ = len_align;
- _ = ra;
- const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
- var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
- while (true) {
- const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
- return error.OutOfMemory;
- const adjusted_index = end_index + adjust_off;
- const new_end_index = adjusted_index + n;
- if (new_end_index > self.buffer.len) {
- return error.OutOfMemory;
- }
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
- }
- }
+pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `getThreadSafeAllocator` on FixedBufferAllocator");
- pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
- self.end_index = 0;
- }
- };
- }
-};
-
-pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
return StackFallbackAllocator(size){
.buffer = undefined,
.fallback_allocator = fallback_allocator,
.fixed_buffer_allocator = undefined,
- .allocator = Allocator{
- .allocFn = StackFallbackAllocator(size).alloc,
- .resizeFn = StackFallbackAllocator(size).resize,
- },
};
}
@@ -829,40 +792,38 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
const Self = @This();
buffer: [size]u8,
- allocator: Allocator,
- fallback_allocator: *Allocator,
+ fallback_allocator: Allocator,
fixed_buffer_allocator: FixedBufferAllocator,
- pub fn get(self: *Self) *Allocator {
+ /// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
+ pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
- return &self.allocator;
+ return Allocator.init(self, alloc, resize);
}
fn alloc(
- allocator: *Allocator,
+ self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
- return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, len, ptr_align, len_align, return_address) catch
- return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address);
+ return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
+ return self.fallback_allocator.allocFn(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
- return FixedBufferAllocator.resize(&self.fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address);
+ return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
} else {
- return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address);
+ return self.fallback_allocator.resizeFn(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
}
}
};
@@ -950,8 +911,8 @@ test "HeapAllocator" {
if (builtin.os.tag == .windows) {
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
+ const allocator = heap_allocator.getAllocator();
- const allocator = &heap_allocator.allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
@@ -962,36 +923,39 @@ test "HeapAllocator" {
test "ArenaAllocator" {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();
+ const allocator = arena_allocator.getAllocator();
- try testAllocator(&arena_allocator.allocator);
- try testAllocatorAligned(&arena_allocator.allocator);
- try testAllocatorLargeAlignment(&arena_allocator.allocator);
- try testAllocatorAlignedShrink(&arena_allocator.allocator);
+ try testAllocator(allocator);
+ try testAllocatorAligned(allocator);
+ try testAllocatorLargeAlignment(allocator);
+ try testAllocatorAlignedShrink(allocator);
}
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
+ const allocator = fixed_buffer_allocator.getAllocator();
- try testAllocator(&fixed_buffer_allocator.allocator);
- try testAllocatorAligned(&fixed_buffer_allocator.allocator);
- try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
- try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+ try testAllocator(allocator);
+ try testAllocatorAligned(allocator);
+ try testAllocatorLargeAlignment(allocator);
+ try testAllocatorAlignedShrink(allocator);
}
test "FixedBufferAllocator.reset" {
var buf: [8]u8 align(@alignOf(u64)) = undefined;
var fba = FixedBufferAllocator.init(buf[0..]);
+ const allocator = fba.getAllocator();
const X = 0xeeeeeeeeeeeeeeee;
const Y = 0xffffffffffffffff;
- var x = try fba.allocator.create(u64);
+ var x = try allocator.create(u64);
x.* = X;
- try testing.expectError(error.OutOfMemory, fba.allocator.create(u64));
+ try testing.expectError(error.OutOfMemory, allocator.create(u64));
fba.reset();
- var y = try fba.allocator.create(u64);
+ var y = try allocator.create(u64);
y.* = Y;
// we expect Y to have overwritten X.
@@ -1014,23 +978,25 @@ test "FixedBufferAllocator Reuse memory on realloc" {
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ const allocator = fixed_buffer_allocator.getAllocator();
- var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
+ var slice0 = try allocator.alloc(u8, 5);
try testing.expect(slice0.len == 5);
- var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
+ var slice1 = try allocator.realloc(slice0, 10);
try testing.expect(slice1.ptr == slice0.ptr);
try testing.expect(slice1.len == 10);
- try testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
+ try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+ const allocator = fixed_buffer_allocator.getAllocator();
- var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ var slice0 = try allocator.alloc(u8, 2);
slice0[0] = 1;
slice0[1] = 2;
- var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
- var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
+ var slice1 = try allocator.alloc(u8, 2);
+ var slice2 = try allocator.realloc(slice0, 4);
try testing.expect(slice0.ptr != slice2.ptr);
try testing.expect(slice1.ptr != slice2.ptr);
try testing.expect(slice2[0] == 1);
@@ -1038,19 +1004,19 @@ test "FixedBufferAllocator Reuse memory on realloc" {
}
}
-test "ThreadSafeFixedBufferAllocator" {
- var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+test "Thread safe FixedBufferAllocator" {
+ var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
- try testAllocator(&fixed_buffer_allocator.allocator);
- try testAllocatorAligned(&fixed_buffer_allocator.allocator);
- try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
- try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+ try testAllocator(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocatorAligned(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocatorLargeAlignment(fixed_buffer_allocator.getThreadSafeAllocator());
+ try testAllocatorAlignedShrink(fixed_buffer_allocator.getThreadSafeAllocator());
}
/// This one should not try alignments that exceed what C malloc can handle.
-pub fn testAllocator(base_allocator: *mem.Allocator) !void {
+pub fn testAllocator(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
var slice = try allocator.alloc(*i32, 100);
try testing.expect(slice.len == 100);
@@ -1094,9 +1060,9 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
allocator.free(oversize);
}
-pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
// Test a few alignment values, smaller and bigger than the type's one
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
@@ -1124,9 +1090,9 @@ pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
}
}
-pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
//Maybe a platform's page_size is actually the same as or
// very near usize?
@@ -1156,12 +1122,12 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
allocator.free(slice);
}
-pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
- const allocator = &validationAllocator.allocator;
+ const allocator = validationAllocator.getAllocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = FixedBufferAllocator.init(&debug_buffer).getAllocator();
const alloc_size = mem.page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index d61f66ce4a50..65b08399456f 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -6,9 +6,7 @@ const Allocator = std.mem.Allocator;
/// This allocator takes an existing allocator, wraps it, and provides an interface
/// where you can allocate without freeing, and then free it all together.
pub const ArenaAllocator = struct {
- allocator: Allocator,
-
- child_allocator: *Allocator,
+ child_allocator: Allocator,
state: State,
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
@@ -17,21 +15,21 @@ pub const ArenaAllocator = struct {
buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
end_index: usize = 0,
- pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
+ pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
return .{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.child_allocator = child_allocator,
.state = self,
};
}
};
+ pub fn getAllocator(self: *ArenaAllocator) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
const BufNode = std.SinglyLinkedList([]u8).Node;
- pub fn init(child_allocator: *Allocator) ArenaAllocator {
+ pub fn init(child_allocator: Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);
}
@@ -49,7 +47,7 @@ pub const ArenaAllocator = struct {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
- const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress());
+ const buf = try self.child_allocator.allocFn(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
buf_node.* = BufNode{
.data = buf,
@@ -60,10 +58,9 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+ fn alloc(self: *ArenaAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
- const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
while (true) {
@@ -91,11 +88,10 @@ pub const ArenaAllocator = struct {
}
}
- fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
+ fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
_ = buf_align;
_ = len_align;
_ = ret_addr;
- const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index c3c28a53b6c9..d4f1dde29979 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -172,11 +172,7 @@ pub const Config = struct {
pub fn GeneralPurposeAllocator(comptime config: Config) type {
return struct {
- allocator: Allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
- backing_allocator: *Allocator = std.heap.page_allocator,
+ backing_allocator: Allocator = std.heap.page_allocator,
buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
large_allocations: LargeAllocTable = .{},
empty_buckets: if (config.retain_metadata) ?*BucketHeader else void =
@@ -284,6 +280,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
};
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
fn bucketStackTrace(
bucket: *BucketHeader,
size_class: usize,
@@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var it = self.large_allocations.iterator();
while (it.next()) |large| {
if (large.value_ptr.freed) {
- _ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
+ _ = self.backing_allocator.resizeFn(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
}
}
}
@@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const result_len = if (config.never_unmap and new_size == 0)
0
else
- try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
+ try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
if (config.enable_memory_limit) {
entry.value_ptr.requested_size = new_size;
@@ -606,15 +606,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
old_mem: []u8,
old_align: u29,
new_size: usize,
len_align: u29,
ret_addr: usize,
) Error!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
self.mutex.lock();
defer self.mutex.unlock();
@@ -755,9 +753,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return true;
}
- fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
+ fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
self.mutex.lock();
defer self.mutex.unlock();
@@ -768,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
- const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
+ const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
if (config.retain_metadata and !config.never_unmap) {
@@ -834,7 +830,7 @@ const test_config = Config{};
test "small allocations - free in same order" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -853,7 +849,7 @@ test "small allocations - free in same order" {
test "small allocations - free in reverse order" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var list = std.ArrayList(*u64).init(std.testing.allocator);
defer list.deinit();
@@ -872,7 +868,7 @@ test "small allocations - free in reverse order" {
test "large allocations" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const ptr1 = try allocator.alloc(u64, 42768);
const ptr2 = try allocator.alloc(u64, 52768);
@@ -885,7 +881,7 @@ test "large allocations" {
test "realloc" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
defer allocator.free(slice);
@@ -907,7 +903,7 @@ test "realloc" {
test "shrink" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
@@ -930,7 +926,7 @@ test "shrink" {
test "large object - grow" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
@@ -948,7 +944,7 @@ test "large object - grow" {
test "realloc small object to large object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, 70);
defer allocator.free(slice);
@@ -965,7 +961,7 @@ test "realloc small object to large object" {
test "shrink large object to large object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -988,10 +984,10 @@ test "shrink large object to large object" {
test "shrink large object to large object with larger alignment" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
const alloc_size = page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
@@ -1023,7 +1019,7 @@ test "shrink large object to large object with larger alignment" {
test "realloc large object to small object" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1041,7 +1037,7 @@ test "overrideable mutexes" {
.mutex = std.Thread.Mutex{},
};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
@@ -1050,7 +1046,7 @@ test "overrideable mutexes" {
test "non-page-allocator backing allocator" {
var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
@@ -1059,10 +1055,10 @@ test "non-page-allocator backing allocator" {
test "realloc large object to larger alignment" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1098,9 +1094,9 @@ test "realloc large object to larger alignment" {
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
- var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
+ var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.getAllocator() };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
@@ -1117,7 +1113,7 @@ test "large object shrinks to small but allocation fails during shrink" {
test "objects of size 1024 and 2048" {
var gpa = GeneralPurposeAllocator(test_config){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
const slice = try allocator.alloc(u8, 1025);
const slice2 = try allocator.alloc(u8, 3000);
@@ -1129,7 +1125,7 @@ test "objects of size 1024 and 2048" {
test "setting a memory cap" {
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
gpa.setRequestedMemoryLimit(1010);
@@ -1158,9 +1154,9 @@ test "double frees" {
defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak");
const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
- var gpa = GPA{ .backing_allocator = &backing_gpa.allocator };
+ var gpa = GPA{ .backing_allocator = backing_gpa.getAllocator() };
defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
- const allocator = &gpa.allocator;
+ const allocator = gpa.getAllocator();
// detect a small allocation double free, even though bucket is emptied
const index: usize = 6;
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
index cf9c4162a742..1f3146f79fb3 100644
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ b/lib/std/heap/log_to_writer_allocator.zig
@@ -5,33 +5,31 @@ const Allocator = std.mem.Allocator;
/// on every call to the allocator. Writer errors are ignored.
pub fn LogToWriterAllocator(comptime Writer: type) type {
return struct {
- allocator: Allocator,
- parent_allocator: *Allocator,
+ parent_allocator: Allocator,
writer: Writer,
const Self = @This();
- pub fn init(parent_allocator: *Allocator, writer: Writer) Self {
+ pub fn init(parent_allocator: Allocator, writer: Writer) Self {
return Self{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.parent_allocator = parent_allocator,
.writer = writer,
};
}
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
fn alloc(
- allocator: *Allocator,
+ self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
ra: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
self.writer.print("alloc : {}", .{len}) catch {};
- const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
+ const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
if (result) |_| {
self.writer.print(" success!\n", .{}) catch {};
} else |_| {
@@ -41,14 +39,13 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
if (new_len == 0) {
self.writer.print("free : {}\n", .{buf.len}) catch {};
} else if (new_len <= buf.len) {
@@ -56,7 +53,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
} else {
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
- if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
+ if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len > buf.len) {
self.writer.print(" success!\n", .{}) catch {};
}
@@ -73,7 +70,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
/// This allocator is used in front of another allocator and logs to the provided writer
/// on every call to the allocator. Writer errors are ignored.
pub fn logToWriterAllocator(
- parent_allocator: *Allocator,
+ parent_allocator: Allocator,
writer: anytype,
) LogToWriterAllocator(@TypeOf(writer)) {
return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer);
@@ -85,7 +82,7 @@ test "LogToWriterAllocator" {
var allocator_buf: [10]u8 = undefined;
var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- const allocator = &logToWriterAllocator(&fixedBufferAllocator.allocator, fbs.writer()).allocator;
+ const allocator = logToWriterAllocator(fixedBufferAllocator.getAllocator(), fbs.writer()).getAllocator();
var a = try allocator.alloc(u8, 10);
a = allocator.shrink(a, 5);
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 0c6224b7ce15..34dc554dee5f 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -22,21 +22,20 @@ pub fn ScopedLoggingAllocator(
const log = std.log.scoped(scope);
return struct {
- allocator: Allocator,
- parent_allocator: *Allocator,
+ parent_allocator: Allocator,
const Self = @This();
- pub fn init(parent_allocator: *Allocator) Self {
+ pub fn init(parent_allocator: Allocator) Self {
return .{
- .allocator = Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.parent_allocator = parent_allocator,
};
}
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
+ }
+
// This function is required as the `std.log.log` function is not public
inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
switch (log_level) {
@@ -48,13 +47,12 @@ pub fn ScopedLoggingAllocator(
}
fn alloc(
- allocator: *Allocator,
+ self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
ra: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
if (result) |_| {
logHelper(
@@ -73,15 +71,13 @@ pub fn ScopedLoggingAllocator(
}
fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(Self, "allocator", allocator);
-
if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (new_len == 0) {
logHelper(success_log_level, "free - success - len: {}", .{buf.len});
@@ -116,6 +112,6 @@ pub fn ScopedLoggingAllocator(
/// This allocator is used in front of another allocator and logs to `std.log`
/// on every call to the allocator.
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .err) {
+pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) {
return LoggingAllocator(.debug, .err).init(parent_allocator);
}
diff --git a/lib/std/io/buffered_atomic_file.zig b/lib/std/io/buffered_atomic_file.zig
index 5b27ba78f1a4..71edabb20a5d 100644
--- a/lib/std/io/buffered_atomic_file.zig
+++ b/lib/std/io/buffered_atomic_file.zig
@@ -7,7 +7,7 @@ pub const BufferedAtomicFile = struct {
atomic_file: fs.AtomicFile,
file_writer: File.Writer,
buffered_writer: BufferedWriter,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
pub const buffer_size = 4096;
pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
@@ -16,7 +16,7 @@ pub const BufferedAtomicFile = struct {
/// TODO when https://github.com/ziglang/zig/issues/2761 is solved
/// this API will not need an allocator
pub fn create(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
dir: fs.Dir,
dest_path: []const u8,
atomic_file_options: fs.Dir.AtomicFileOptions,
diff --git a/lib/std/io/peek_stream.zig b/lib/std/io/peek_stream.zig
index c77052f97591..8779e22250eb 100644
--- a/lib/std/io/peek_stream.zig
+++ b/lib/std/io/peek_stream.zig
@@ -38,7 +38,7 @@ pub fn PeekStream(
}
},
.Dynamic => struct {
- pub fn init(base: ReaderType, allocator: *mem.Allocator) Self {
+ pub fn init(base: ReaderType, allocator: mem.Allocator) Self {
return .{
.unbuffered_reader = base,
.fifo = FifoType.init(allocator),
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 3da053e4fb0b..28395526685f 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -88,7 +88,7 @@ pub fn Reader(
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readAllAlloc(self: Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
+ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) ![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.readAllArrayList(&array_list, max_size);
@@ -127,7 +127,7 @@ pub fn Reader(
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(
self: Self,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) ![]u8 {
@@ -163,7 +163,7 @@ pub fn Reader(
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterOrEofAlloc(
self: Self,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) !?[]u8 {
diff --git a/lib/std/json.zig b/lib/std/json.zig
index ff37bc416264..978213a5961c 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1476,7 +1476,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
}
pub const ParseOptions = struct {
- allocator: ?*Allocator = null,
+ allocator: ?Allocator = null,
/// Behaviour when a duplicate field is encountered.
duplicate_field_behavior: enum {
@@ -2033,7 +2033,7 @@ test "parse into tagged union" {
{ // failing allocations should be bubbled up instantly without trying next member
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
- const options = ParseOptions{ .allocator = &fail_alloc.allocator };
+ const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
const T = union(enum) {
// both fields here match the input
string: []const u8,
@@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" {
test "parseFree descends into tagged union" {
var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
- const options = ParseOptions{ .allocator = &fail_alloc.allocator };
+ const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
const T = union(enum) {
int: i32,
float: f64,
@@ -2328,7 +2328,7 @@ test "parse into double recursive union definition" {
/// A non-stream JSON parser which constructs a tree of Value's.
pub const Parser = struct {
- allocator: *Allocator,
+ allocator: Allocator,
state: State,
copy_strings: bool,
// Stores parent nodes and un-combined Values.
@@ -2341,7 +2341,7 @@ pub const Parser = struct {
Simple,
};
- pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+ pub fn init(allocator: Allocator, copy_strings: bool) Parser {
return Parser{
.allocator = allocator,
.state = .Simple,
@@ -2364,9 +2364,10 @@ pub const Parser = struct {
var arena = ArenaAllocator.init(p.allocator);
errdefer arena.deinit();
+ const allocator = arena.getAllocator();
while (try s.next()) |token| {
- try p.transition(&arena.allocator, input, s.i - 1, token);
+ try p.transition(allocator, input, s.i - 1, token);
}
debug.assert(p.stack.items.len == 1);
@@ -2379,7 +2380,7 @@ pub const Parser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void {
+ fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void {
switch (p.state) {
.ObjectKey => switch (token) {
.ObjectEnd => {
@@ -2536,7 +2537,7 @@ pub const Parser = struct {
}
}
- fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
+ fn parseString(p: *Parser, allocator: Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
const slice = s.slice(input, i);
switch (s.escapes) {
.None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
@@ -2737,7 +2738,7 @@ test "write json then parse it" {
try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
}
-fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
+fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value {
var p = Parser.init(arena_allocator, false);
return (try p.parse(json_str)).root;
}
@@ -2745,13 +2746,13 @@ fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
test "parsing empty string gives appropriate error" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- try testing.expectError(error.UnexpectedEndOfJson, testParse(&arena_allocator.allocator, ""));
+ try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.getAllocator(), ""));
}
test "integer after float has proper type" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
- const json = try testParse(&arena_allocator.allocator,
+ const json = try testParse(arena_allocator.getAllocator(),
\\{
\\ "float": 3.14,
\\ "ints": [1, 2, 3]
@@ -2786,7 +2787,7 @@ test "escaped characters" {
\\}
;
- const obj = (try testParse(&arena_allocator.allocator, input)).Object;
+ const obj = (try testParse(arena_allocator.getAllocator(), input)).Object;
try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
@@ -2812,11 +2813,12 @@ test "string copy option" {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_allocator.deinit();
+ const allocator = arena_allocator.getAllocator();
- const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input);
+ const tree_nocopy = try Parser.init(allocator, false).parse(input);
const obj_nocopy = tree_nocopy.root.Object;
- const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input);
+ const tree_copy = try Parser.init(allocator, true).parse(input);
const obj_copy = tree_copy.root.Object;
for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig
index 61da6ec49be0..2ef6fa3a86c2 100644
--- a/lib/std/json/write_stream.zig
+++ b/lib/std/json/write_stream.zig
@@ -243,7 +243,7 @@ test "json write stream" {
try w.beginObject();
try w.objectField("object");
- try w.emitJson(try getJsonObject(&arena_allocator.allocator));
+ try w.emitJson(try getJsonObject(arena_allocator.getAllocator()));
try w.objectField("string");
try w.emitString("This is a string");
@@ -286,7 +286,7 @@ test "json write stream" {
try std.testing.expect(std.mem.eql(u8, expected, result));
}
-fn getJsonObject(allocator: *std.mem.Allocator) !std.json.Value {
+fn getJsonObject(allocator: std.mem.Allocator) !std.json.Value {
var value = std.json.Value{ .Object = std.json.ObjectMap.init(allocator) };
try value.Object.put("one", std.json.Value{ .Integer = @intCast(i64, 1) });
try value.Object.put("two", std.json.Value{ .Float = 2.0 });
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index a8ad58be02ca..d7bcf9badcd5 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -142,7 +142,7 @@ pub const Mutable = struct {
/// Asserts that the allocator owns the limbs memory. If this is not the case,
/// use `toConst().toManaged()`.
- pub fn toManaged(self: Mutable, allocator: *Allocator) Managed {
+ pub fn toManaged(self: Mutable, allocator: Allocator) Managed {
return .{
.allocator = allocator,
.limbs = self.limbs,
@@ -283,7 +283,7 @@ pub const Mutable = struct {
base: u8,
value: []const u8,
limbs_buffer: []Limb,
- allocator: ?*Allocator,
+ allocator: ?Allocator,
) error{InvalidCharacter}!void {
assert(base >= 2 and base <= 16);
@@ -608,7 +608,7 @@ pub const Mutable = struct {
/// rma is given by `a.limbs.len + b.limbs.len`.
///
/// `limbs_buffer` is used for temporary storage. The amount required is given by `calcMulLimbsBufferLen`.
- pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void {
+ pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?Allocator) void {
var buf_index: usize = 0;
const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: {
@@ -638,7 +638,7 @@ pub const Mutable = struct {
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
- pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?*Allocator) void {
+ pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?Allocator) void {
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
@@ -674,7 +674,7 @@ pub const Mutable = struct {
signedness: Signedness,
bit_count: usize,
limbs_buffer: []Limb,
- allocator: ?*Allocator,
+ allocator: ?Allocator,
) void {
var buf_index: usize = 0;
const req_limbs = calcTwosCompLimbCount(bit_count);
@@ -714,7 +714,7 @@ pub const Mutable = struct {
b: Const,
signedness: Signedness,
bit_count: usize,
- allocator: ?*Allocator,
+ allocator: ?Allocator,
) void {
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
@@ -763,7 +763,7 @@ pub const Mutable = struct {
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
- pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
+ pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?Allocator) void {
_ = opt_allocator;
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
@@ -1660,7 +1660,7 @@ pub const Const = struct {
positive: bool,
/// The result is an independent resource which is managed by the caller.
- pub fn toManaged(self: Const, allocator: *Allocator) Allocator.Error!Managed {
+ pub fn toManaged(self: Const, allocator: Allocator) Allocator.Error!Managed {
const limbs = try allocator.alloc(Limb, math.max(Managed.default_capacity, self.limbs.len));
mem.copy(Limb, limbs, self.limbs);
return Managed{
@@ -1873,7 +1873,7 @@ pub const Const = struct {
/// Caller owns returned memory.
/// Asserts that `base` is in the range [2, 16].
/// See also `toString`, a lower level function than this.
- pub fn toStringAlloc(self: Const, allocator: *Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
+ pub fn toStringAlloc(self: Const, allocator: Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
assert(base >= 2);
assert(base <= 16);
@@ -2092,7 +2092,7 @@ pub const Managed = struct {
pub const default_capacity = 4;
/// Allocator used by the Managed when requesting memory.
- allocator: *Allocator,
+ allocator: Allocator,
/// Raw digits. These are:
///
@@ -2109,7 +2109,7 @@ pub const Managed = struct {
/// Creates a new `Managed`. `default_capacity` limbs will be allocated immediately.
/// The integer value after initializing is `0`.
- pub fn init(allocator: *Allocator) !Managed {
+ pub fn init(allocator: Allocator) !Managed {
return initCapacity(allocator, default_capacity);
}
@@ -2131,7 +2131,7 @@ pub const Managed = struct {
/// Creates a new `Managed` with value `value`.
///
/// This is identical to an `init`, followed by a `set`.
- pub fn initSet(allocator: *Allocator, value: anytype) !Managed {
+ pub fn initSet(allocator: Allocator, value: anytype) !Managed {
var s = try Managed.init(allocator);
try s.set(value);
return s;
@@ -2140,7 +2140,7 @@ pub const Managed = struct {
/// Creates a new Managed with a specific capacity. If capacity < default_capacity then the
/// default capacity will be used instead.
/// The integer value after initializing is `0`.
- pub fn initCapacity(allocator: *Allocator, capacity: usize) !Managed {
+ pub fn initCapacity(allocator: Allocator, capacity: usize) !Managed {
return Managed{
.allocator = allocator,
.metadata = 1,
@@ -2206,7 +2206,7 @@ pub const Managed = struct {
return other.cloneWithDifferentAllocator(other.allocator);
}
- pub fn cloneWithDifferentAllocator(other: Managed, allocator: *Allocator) !Managed {
+ pub fn cloneWithDifferentAllocator(other: Managed, allocator: Allocator) !Managed {
return Managed{
.allocator = allocator,
.metadata = other.metadata,
@@ -2347,7 +2347,7 @@ pub const Managed = struct {
/// Converts self to a string in the requested base. Memory is allocated from the provided
/// allocator and not the one present in self.
- pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
+ pub fn toString(self: Managed, allocator: Allocator, base: u8, case: std.fmt.Case) ![]u8 {
_ = allocator;
if (base < 2 or base > 16) return error.InvalidBase;
return self.toConst().toStringAlloc(self.allocator, base, case);
@@ -2784,7 +2784,7 @@ const AccOp = enum {
/// r MUST NOT alias any of a or b.
///
/// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
-fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
+fn llmulacc(comptime op: AccOp, opt_allocator: ?Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
@setRuntimeSafety(debug_safety);
assert(r.len >= a.len);
assert(r.len >= b.len);
@@ -2819,7 +2819,7 @@ fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []cons
/// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
fn llmulaccKaratsuba(
comptime op: AccOp,
- allocator: *Allocator,
+ allocator: Allocator,
r: []Limb,
a: []const Limb,
b: []const Limb,
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 1f66417496ca..de6804ca017e 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -29,7 +29,7 @@ pub const Rational = struct {
/// Create a new Rational. A small amount of memory will be allocated on initialization.
/// This will be 2 * Int.default_capacity.
- pub fn init(a: *Allocator) !Rational {
+ pub fn init(a: Allocator) !Rational {
return Rational{
.p = try Int.init(a),
.q = try Int.initSet(a, 1),
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 0390733b3d2d..b5dc50191178 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -37,24 +37,26 @@ pub const Allocator = @import("mem/Allocator.zig");
pub fn ValidationAllocator(comptime T: type) type {
return struct {
const Self = @This();
- allocator: Allocator,
+
underlying_allocator: T,
+
pub fn init(allocator: T) @This() {
return .{
- .allocator = .{
- .allocFn = alloc,
- .resizeFn = resize,
- },
.underlying_allocator = allocator,
};
}
- fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
- if (T == *Allocator) return self.underlying_allocator;
- if (*T == *Allocator) return &self.underlying_allocator;
- return &self.underlying_allocator.allocator;
+
+ pub fn getAllocator(self: *Self) Allocator {
+ return Allocator.init(self, alloc, resize);
}
+
+ fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
+ if (T == Allocator) return self.underlying_allocator;
+ return self.underlying_allocator.getAllocator();
+ }
+
pub fn alloc(
- allocator: *Allocator,
+ self: *Self,
n: usize,
ptr_align: u29,
len_align: u29,
@@ -67,9 +69,8 @@ pub fn ValidationAllocator(comptime T: type) type {
assert(n >= len_align);
}
- const self = @fieldParentPtr(@This(), "allocator", allocator);
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr);
+ const result = try underlying.allocFn(underlying.ptr, n, ptr_align, len_align, ret_addr);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
@@ -79,8 +80,9 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
+
pub fn resize(
- allocator: *Allocator,
+ self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
@@ -92,9 +94,8 @@ pub fn ValidationAllocator(comptime T: type) type {
assert(mem.isAlignedAnyAlign(new_len, len_align));
assert(new_len >= len_align);
}
- const self = @fieldParentPtr(@This(), "allocator", allocator);
const underlying = self.getUnderlyingAllocatorPtr();
- const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr);
+ const result = try underlying.resizeFn(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
if (len_align == 0) {
assert(result == new_len);
} else {
@@ -103,7 +104,7 @@ pub fn ValidationAllocator(comptime T: type) type {
}
return result;
}
- pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
+ pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
pub fn reset(self: *Self) void {
self.underlying_allocator.reset();
}
@@ -130,12 +131,14 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
return adjusted;
}
-var failAllocator = Allocator{
- .allocFn = failAllocatorAlloc,
- .resizeFn = Allocator.noResize,
+const failAllocator = blk: {
+ // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+ // allowing the use of `*void` but it would still be ugly
+ var tmp: u1 = 0;
+ break :blk Allocator.init(&tmp, failAllocatorAlloc, Allocator.NoResize(u1).noResize);
};
-fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
- _ = self;
+
+fn failAllocatorAlloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
_ = n;
_ = alignment;
_ = len_align;
@@ -1786,18 +1789,18 @@ pub fn SplitIterator(comptime T: type) type {
/// Naively combines a series of slices with a separator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
return joinMaybeZ(allocator, separator, slices, false);
}
/// Naively combines a series of slices with a separator and null terminator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
+pub fn joinZ(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
const out = try joinMaybeZ(allocator, separator, slices, true);
return out[0 .. out.len - 1 :0];
}
-fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
+fn joinMaybeZ(allocator: Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
if (slices.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
const total_len = blk: {
@@ -1876,7 +1879,7 @@ test "mem.joinZ" {
}
/// Copies each T from slices into a new slice that exactly holds all the elements.
-pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T {
+pub fn concat(allocator: Allocator, comptime T: type, slices: []const []const T) ![]T {
if (slices.len == 0) return &[0]T{};
const total_len = blk: {
@@ -2318,7 +2321,7 @@ test "replacementSize" {
}
/// Perform a replacement on an allocated buffer of pre-determined size. Caller must free returned memory.
-pub fn replaceOwned(comptime T: type, allocator: *Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
+pub fn replaceOwned(comptime T: type, allocator: Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
var output = try allocator.alloc(T, replacementSize(T, input, needle, replacement));
_ = replace(T, input, needle, replacement, output);
return output;
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index a3c0995496f7..8478120b003a 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -8,6 +8,9 @@ const Allocator = @This();
pub const Error = error{OutOfMemory};
+// The type erased pointer to the allocator implementation
+ptr: *c_void,
+
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
@@ -17,7 +20,7 @@ pub const Error = error{OutOfMemory};
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
-allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
+allocFn: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
@@ -39,24 +42,56 @@ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_a
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
-resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+resizeFn: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+
+pub fn init(
+ pointer: anytype,
+ comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
+ comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+) Allocator {
+ const Ptr = @TypeOf(pointer);
+ assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
+ assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
+ const gen = struct {
+ fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
+ const alignment = @typeInfo(Ptr).Pointer.alignment;
+ const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+ return allocFn(self, len, ptr_align, len_align, ret_addr);
+ }
+ fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
+ const alignment = @typeInfo(Ptr).Pointer.alignment;
+ const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+ return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
+ }
+ };
-/// Set to resizeFn if in-place resize is not supported.
-pub fn noResize(
- self: *Allocator,
- buf: []u8,
- buf_align: u29,
- new_len: usize,
- len_align: u29,
- ret_addr: usize,
-) Error!usize {
- _ = self;
- _ = buf_align;
- _ = len_align;
- _ = ret_addr;
- if (new_len > buf.len)
- return error.OutOfMemory;
- return new_len;
+ return .{
+ .ptr = pointer,
+ .allocFn = gen.alloc,
+ .resizeFn = gen.resize,
+ };
+}
+
+/// Set resizeFn to `NoResize(AllocatorType).noResize` if in-place resize is not supported.
+pub fn NoResize(comptime AllocatorType: type) type {
+ return struct {
+ pub fn noResize(
+ self: *AllocatorType,
+ buf: []u8,
+ buf_align: u29,
+ new_len: usize,
+ len_align: u29,
+ ret_addr: usize,
+ ) Error!usize {
+ _ = self;
+ _ = buf_align;
+ _ = len_align;
+ _ = ret_addr;
+ if (new_len > buf.len)
+ return error.OutOfMemory;
+ return new_len;
+ }
+ };
}
/// Realloc is used to modify the size or alignment of an existing allocation,
@@ -80,8 +115,8 @@ pub fn noResize(
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
-pub fn reallocBytes(
- self: *Allocator,
+fn reallocBytes(
+ self: Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
@@ -106,7 +141,7 @@ pub fn reallocBytes(
return_address: usize,
) Error![]u8 {
if (old_mem.len == 0) {
- const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address);
+ const new_mem = try self.allocFn(self.ptr, new_byte_count, new_alignment, len_align, return_address);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(new_mem.ptr, undefined, new_byte_count);
return new_mem;
@@ -117,7 +152,7 @@ pub fn reallocBytes(
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
return old_mem.ptr[0..shrunk_len];
}
- if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
+ if (self.resizeFn(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
assert(resized_len >= new_byte_count);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
@@ -133,7 +168,7 @@ pub fn reallocBytes(
/// Move the given memory to a new location in the given allocator to accomodate a new
/// size and alignment.
fn moveBytes(
- self: *Allocator,
+ self: Allocator,
old_mem: []u8,
old_align: u29,
new_len: usize,
@@ -143,7 +178,7 @@ fn moveBytes(
) Error![]u8 {
assert(old_mem.len > 0);
assert(new_len > 0);
- const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address);
+ const new_mem = try self.allocFn(self.ptr, new_len, new_alignment, len_align, return_address);
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
// TODO https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr, undefined, old_mem.len);
@@ -153,7 +188,7 @@ fn moveBytes(
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
-pub fn create(self: *Allocator, comptime T: type) Error!*T {
+pub fn create(self: Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return @as(*T, undefined);
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
return &slice[0];
@@ -161,7 +196,7 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
-pub fn destroy(self: *Allocator, ptr: anytype) void {
+pub fn destroy(self: Allocator, ptr: anytype) void {
const info = @typeInfo(@TypeOf(ptr)).Pointer;
const T = info.child;
if (@sizeOf(T) == 0) return;
@@ -177,12 +212,12 @@ pub fn destroy(self: *Allocator, ptr: anytype) void {
/// call `free` when done.
///
/// For allocating a single item, see `create`.
-pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
+pub fn alloc(self: Allocator, comptime T: type, n: usize) Error![]T {
return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
}
pub fn allocWithOptions(
- self: *Allocator,
+ self: Allocator,
comptime Elem: type,
n: usize,
/// null means naturally aligned
@@ -193,7 +228,7 @@ pub fn allocWithOptions(
}
pub fn allocWithOptionsRetAddr(
- self: *Allocator,
+ self: Allocator,
comptime Elem: type,
n: usize,
/// null means naturally aligned
@@ -227,7 +262,7 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
///
/// For allocating a single item, see `create`.
pub fn allocSentinel(
- self: *Allocator,
+ self: Allocator,
comptime Elem: type,
n: usize,
comptime sentinel: Elem,
@@ -236,7 +271,7 @@ pub fn allocSentinel(
}
pub fn alignedAlloc(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
@@ -246,7 +281,7 @@ pub fn alignedAlloc(
}
pub fn allocAdvanced(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
@@ -259,7 +294,7 @@ pub fn allocAdvanced(
pub const Exact = enum { exact, at_least };
pub fn allocAdvancedWithRetAddr(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
@@ -285,7 +320,7 @@ pub fn allocAdvancedWithRetAddr(
.exact => 0,
.at_least => size_of_T,
};
- const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address);
+ const byte_slice = try self.allocFn(self.ptr, byte_count, a, len_align, return_address);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
@@ -301,7 +336,7 @@ pub fn allocAdvancedWithRetAddr(
}
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
-pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
+pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == 0) {
@@ -310,7 +345,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
+ const rc = try self.resizeFn(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
assert(rc == new_byte_count);
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
@@ -326,7 +361,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
-pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
@@ -334,7 +369,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
}
-pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn reallocAtLeast(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
@@ -346,7 +381,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
pub fn reallocAdvanced(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -356,7 +391,7 @@ pub fn reallocAdvanced(
}
pub fn reallocAdvancedWithRetAddr(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -389,7 +424,7 @@ pub fn reallocAdvancedWithRetAddr(
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
-pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn shrink(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
@@ -401,7 +436,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
/// a new alignment, which must be smaller or the same as the old
/// allocation.
pub fn alignedShrink(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -413,7 +448,7 @@ pub fn alignedShrink(
/// the return address of the first stack frame, which may be relevant for
/// allocators which collect stack traces.
pub fn alignedShrinkWithRetAddr(
- self: *Allocator,
+ self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
@@ -440,7 +475,7 @@ pub fn alignedShrinkWithRetAddr(
/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
-pub fn free(self: *Allocator, memory: anytype) void {
+pub fn free(self: Allocator, memory: anytype) void {
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
@@ -452,14 +487,14 @@ pub fn free(self: *Allocator, memory: anytype) void {
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
-pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
mem.copy(T, new_buf, m);
return new_buf;
}
/// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
-pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
+pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
const new_buf = try allocator.alloc(T, m.len + 1);
mem.copy(T, new_buf, m);
new_buf[m.len] = 0;
@@ -471,7 +506,7 @@ pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
/// This function allows a runtime `buf_align` value. Callers should generally prefer
/// to call `shrink` directly.
pub fn shrinkBytes(
- self: *Allocator,
+ self: Allocator,
buf: []u8,
buf_align: u29,
new_len: usize,
@@ -479,5 +514,5 @@ pub fn shrinkBytes(
return_address: usize,
) usize {
assert(new_len <= buf.len);
- return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable;
+ return self.resizeFn(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
}
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 2e36eacd7fef..a651076aba43 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -59,7 +59,7 @@ pub fn MultiArrayList(comptime S: type) type {
};
}
- pub fn deinit(self: *Slice, gpa: *Allocator) void {
+ pub fn deinit(self: *Slice, gpa: Allocator) void {
var other = self.toMultiArrayList();
other.deinit(gpa);
self.* = undefined;
@@ -106,7 +106,7 @@ pub fn MultiArrayList(comptime S: type) type {
};
/// Release all allocated memory.
- pub fn deinit(self: *Self, gpa: *Allocator) void {
+ pub fn deinit(self: *Self, gpa: Allocator) void {
gpa.free(self.allocatedBytes());
self.* = undefined;
}
@@ -161,7 +161,7 @@ pub fn MultiArrayList(comptime S: type) type {
}
/// Extend the list by 1 element. Allocates more memory as necessary.
- pub fn append(self: *Self, gpa: *Allocator, elem: S) !void {
+ pub fn append(self: *Self, gpa: Allocator, elem: S) !void {
try self.ensureUnusedCapacity(gpa, 1);
self.appendAssumeCapacity(elem);
}
@@ -188,7 +188,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// after and including the specified index back by one and
/// sets the given index to the specified element. May reallocate
/// and invalidate iterators.
- pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
+ pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: S) void {
try self.ensureUnusedCapacity(gpa, 1);
self.insertAssumeCapacity(index, elem);
}
@@ -242,7 +242,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Adjust the list's length to `new_len`.
/// Does not initialize added items, if any.
- pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void {
+ pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void {
try self.ensureTotalCapacity(gpa, new_len);
self.len = new_len;
}
@@ -250,7 +250,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Attempt to reduce allocated capacity to `new_len`.
/// If `new_len` is greater than zero, this may fail to reduce the capacity,
/// but the data remains intact and the length is updated to new_len.
- pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void {
+ pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
if (new_len == 0) {
gpa.free(self.allocatedBytes());
self.* = .{};
@@ -314,7 +314,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Modify the array so that it can hold at least `new_capacity` items.
/// Implements super-linear growth to achieve amortized O(1) append operations.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureTotalCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
+ pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
var better_capacity = self.capacity;
if (better_capacity >= new_capacity) return;
@@ -328,14 +328,14 @@ pub fn MultiArrayList(comptime S: type) type {
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates pointers if additional memory is needed.
- pub fn ensureUnusedCapacity(self: *Self, gpa: *Allocator, additional_count: usize) !void {
+ pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void {
return self.ensureTotalCapacity(gpa, self.len + additional_count);
}
/// Modify the array so that it can hold exactly `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
/// `new_capacity` must be greater or equal to `len`.
- pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
+ pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
assert(new_capacity >= self.len);
const new_bytes = try gpa.allocAdvanced(
u8,
@@ -372,7 +372,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// Create a copy of this list with a new backing store,
/// using the specified allocator.
- pub fn clone(self: Self, gpa: *Allocator) !Self {
+ pub fn clone(self: Self, gpa: Allocator) !Self {
var result = Self{};
errdefer result.deinit(gpa);
try result.ensureTotalCapacity(gpa, self.len);
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 759adaa75673..4f5ce84034b0 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -664,7 +664,7 @@ pub const AddressList = struct {
};
/// All memory allocated with `allocator` will be freed before this function returns.
-pub fn tcpConnectToHost(allocator: *mem.Allocator, name: []const u8, port: u16) !Stream {
+pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !Stream {
const list = try getAddressList(allocator, name, port);
defer list.deinit();
@@ -699,12 +699,12 @@ pub fn tcpConnectToAddress(address: Address) !Stream {
}
/// Call `AddressList.deinit` on the result.
-pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*AddressList {
+pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*AddressList {
const result = blk: {
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
- const result = try arena.allocator.create(AddressList);
+ const result = try arena.getAllocator().create(AddressList);
result.* = AddressList{
.arena = arena,
.addrs = undefined,
@@ -712,7 +712,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*
};
break :blk result;
};
- const arena = &result.arena.allocator;
+ const arena = result.arena.getAllocator();
errdefer result.arena.deinit();
if (builtin.target.os.tag == .windows or builtin.link_libc) {
@@ -1303,7 +1303,7 @@ const ResolvConf = struct {
/// Ignores lines longer than 512 bytes.
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
-fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void {
+fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
rc.* = ResolvConf{
.ns = std.ArrayList(LookupAddr).init(allocator),
.search = std.ArrayList(u8).init(allocator),
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index 1742fb294713..f181bb49eaab 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -230,7 +230,7 @@ test "listen on ipv4 try connect on ipv6 then ipv4" {
try await client_frame;
}
-fn testClientToHost(allocator: *mem.Allocator, name: []const u8, port: u16) anyerror!void {
+fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const connection = try net.tcpConnectToHost(allocator, name, port);
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index d3c8d13bd123..fb5105706c4f 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -58,10 +58,11 @@ test "open smoke test" {
// Get base abs path
var arena = ArenaAllocator.init(testing.allocator);
defer arena.deinit();
+ const allocator = arena.getAllocator();
const base_path = blk: {
- const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
- break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+ const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+ break :blk try fs.realpathAlloc(allocator, relative_path);
};
var file_path: []u8 = undefined;
@@ -69,34 +70,34 @@ test "open smoke test" {
const mode: os.mode_t = if (native_os == .windows) 0 else 0o666;
// Create some file using `open`.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
fd = try os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode);
os.close(fd);
// Try this again with the same flags. This op should fail with error.PathAlreadyExists.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
try expectError(error.PathAlreadyExists, os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode));
// Try opening without `O.EXCL` flag.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
fd = try os.open(file_path, os.O.RDWR | os.O.CREAT, mode);
os.close(fd);
// Try opening as a directory which should fail.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
try expectError(error.NotDir, os.open(file_path, os.O.RDWR | os.O.DIRECTORY, mode));
// Create some directory
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
try os.mkdir(file_path, mode);
// Open dir using `open`
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
fd = try os.open(file_path, os.O.RDONLY | os.O.DIRECTORY, mode);
os.close(fd);
// Try opening as file which should fail.
- file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+ file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
try expectError(error.IsDir, os.open(file_path, os.O.RDWR, mode));
}
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 26688d028e04..0a484fed314f 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -460,7 +460,7 @@ pub const PDBStringTableHeader = packed struct {
ByteSize: u32,
};
-fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
+fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 {
const num_words = try stream.readIntLittle(u32);
var list = ArrayList(u32).init(allocator);
errdefer list.deinit();
@@ -481,7 +481,7 @@ fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
pub const Pdb = struct {
in_file: File,
msf: Msf,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
string_table: ?*MsfStream,
dbi: ?*MsfStream,
modules: []Module,
@@ -500,7 +500,7 @@ pub const Pdb = struct {
checksum_offset: ?usize,
};
- pub fn init(allocator: *mem.Allocator, path: []const u8) !Pdb {
+ pub fn init(allocator: mem.Allocator, path: []const u8) !Pdb {
const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking });
errdefer file.close();
@@ -858,7 +858,7 @@ const Msf = struct {
directory: MsfStream,
streams: []MsfStream,
- fn init(allocator: *mem.Allocator, file: File) !Msf {
+ fn init(allocator: mem.Allocator, file: File) !Msf {
const in = file.reader();
const superblock = try in.readStruct(SuperBlock);
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index be81abd96ccc..289ad9480ff1 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -21,10 +21,10 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
items: []T,
len: usize,
- allocator: *Allocator,
+ allocator: Allocator,
/// Initialize and return a new priority dequeue.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.len = 0,
@@ -336,7 +336,7 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
/// Dequeue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// De-initialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
@@ -945,7 +945,7 @@ fn fuzzTestMinMax(rng: std.rand.Random, queue_size: usize) !void {
}
}
-fn generateRandomSlice(allocator: *std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
+fn generateRandomSlice(allocator: std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
var array = std.ArrayList(u32).init(allocator);
try array.ensureTotalCapacity(size);
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index 6d4b6634a465..1ae958f4e4cf 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -20,10 +20,10 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
items: []T,
len: usize,
- allocator: *Allocator,
+ allocator: Allocator,
/// Initialize and return a priority queue.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.len = 0,
@@ -153,7 +153,7 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
/// PriorityQueue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 0e7b5b25ec31..6b45a7e7aa34 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -21,7 +21,7 @@ pub fn getCwd(out_buffer: []u8) ![]u8 {
}
/// Caller must free the returned memory.
-pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
+pub fn getCwdAlloc(allocator: Allocator) ![]u8 {
// The use of MAX_PATH_BYTES here is just a heuristic: most paths will fit
// in stack_buf, avoiding an extra allocation in the common case.
var stack_buf: [fs.MAX_PATH_BYTES]u8 = undefined;
@@ -54,7 +54,7 @@ test "getCwdAlloc" {
}
/// Caller owns resulting `BufMap`.
-pub fn getEnvMap(allocator: *Allocator) !BufMap {
+pub fn getEnvMap(allocator: Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
@@ -154,7 +154,7 @@ pub const GetEnvVarOwnedError = error{
};
/// Caller must free returned memory.
-pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
+pub fn getEnvVarOwned(allocator: mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
if (builtin.os.tag == .windows) {
const result_w = blk: {
const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
@@ -183,10 +183,10 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool {
}
}
-pub fn hasEnvVar(allocator: *Allocator, key: []const u8) error{OutOfMemory}!bool {
+pub fn hasEnvVar(allocator: Allocator, key: []const u8) error{OutOfMemory}!bool {
if (builtin.os.tag == .windows) {
var stack_alloc = std.heap.stackFallback(256 * @sizeOf(u16), allocator);
- const key_w = try std.unicode.utf8ToUtf16LeWithNull(&stack_alloc.allocator, key);
+ const key_w = try std.unicode.utf8ToUtf16LeWithNull(stack_alloc.get(), key);
defer stack_alloc.allocator.free(key_w);
return std.os.getenvW(key_w) != null;
} else {
@@ -227,7 +227,7 @@ pub const ArgIteratorPosix = struct {
};
pub const ArgIteratorWasi = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
index: usize,
args: [][:0]u8,
@@ -235,7 +235,7 @@ pub const ArgIteratorWasi = struct {
/// You must call deinit to free the internal buffer of the
/// iterator after you are done.
- pub fn init(allocator: *mem.Allocator) InitError!ArgIteratorWasi {
+ pub fn init(allocator: mem.Allocator) InitError!ArgIteratorWasi {
const fetched_args = try ArgIteratorWasi.internalInit(allocator);
return ArgIteratorWasi{
.allocator = allocator,
@@ -244,7 +244,7 @@ pub const ArgIteratorWasi = struct {
};
}
- fn internalInit(allocator: *mem.Allocator) InitError![][:0]u8 {
+ fn internalInit(allocator: mem.Allocator) InitError![][:0]u8 {
const w = os.wasi;
var count: usize = undefined;
var buf_size: usize = undefined;
@@ -325,7 +325,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
- pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![:0]u8) {
+ pub fn next(self: *ArgIteratorWindows, allocator: Allocator) ?(NextError![:0]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const character = self.getPointAtIndex();
@@ -379,7 +379,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![:0]u8 {
+ fn internalNext(self: *ArgIteratorWindows, allocator: Allocator) NextError![:0]u8 {
var buf = std.ArrayList(u16).init(allocator);
defer buf.deinit();
@@ -423,7 +423,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn convertFromWindowsCmdLineToUTF8(allocator: *Allocator, buf: []u16) NextError![:0]u8 {
+ fn convertFromWindowsCmdLineToUTF8(allocator: Allocator, buf: []u16) NextError![:0]u8 {
return std.unicode.utf16leToUtf8AllocZ(allocator, buf) catch |err| switch (err) {
error.ExpectedSecondSurrogateHalf,
error.DanglingSurrogateHalf,
@@ -463,7 +463,7 @@ pub const ArgIterator = struct {
pub const InitError = ArgIteratorWasi.InitError;
/// You must deinitialize iterator's internal buffers by calling `deinit` when done.
- pub fn initWithAllocator(allocator: *mem.Allocator) InitError!ArgIterator {
+ pub fn initWithAllocator(allocator: mem.Allocator) InitError!ArgIterator {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
return ArgIterator{ .inner = try InnerType.init(allocator) };
}
@@ -474,7 +474,7 @@ pub const ArgIterator = struct {
pub const NextError = ArgIteratorWindows.NextError;
/// You must free the returned memory when done.
- pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![:0]u8) {
+ pub fn next(self: *ArgIterator, allocator: Allocator) ?(NextError![:0]u8) {
if (builtin.os.tag == .windows) {
return self.inner.next(allocator);
} else {
@@ -513,7 +513,7 @@ pub fn args() ArgIterator {
}
/// You must deinitialize iterator's internal buffers by calling `deinit` when done.
-pub fn argsWithAllocator(allocator: *mem.Allocator) ArgIterator.InitError!ArgIterator {
+pub fn argsWithAllocator(allocator: mem.Allocator) ArgIterator.InitError!ArgIterator {
return ArgIterator.initWithAllocator(allocator);
}
@@ -539,7 +539,7 @@ test "args iterator" {
}
/// Caller must call argsFree on result.
-pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
+pub fn argsAlloc(allocator: mem.Allocator) ![][:0]u8 {
// TODO refactor to only make 1 allocation.
var it = if (builtin.os.tag == .wasi) try argsWithAllocator(allocator) else args();
defer it.deinit();
@@ -579,7 +579,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
return result_slice_list;
}
-pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const [:0]u8) void {
+pub fn argsFree(allocator: mem.Allocator, args_alloc: []const [:0]u8) void {
var total_bytes: usize = 0;
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len + 1;
@@ -741,7 +741,7 @@ pub fn getBaseAddress() usize {
/// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require
/// introducing a new, lower-level function which takes a callback function, and then this
/// function which takes an allocator can exist on top of it.
-pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 {
+pub fn getSelfExeSharedLibPaths(allocator: Allocator) error{OutOfMemory}![][:0]u8 {
switch (builtin.link_mode) {
.Static => return &[_][:0]u8{},
.Dynamic => {},
@@ -833,7 +833,7 @@ pub const ExecvError = std.os.ExecveError || error{OutOfMemory};
/// This function also uses the PATH environment variable to get the full path to the executable.
/// Due to the heap-allocation, it is illegal to call this function in a fork() child.
/// For that use case, use the `std.os` functions directly.
-pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
+pub fn execv(allocator: mem.Allocator, argv: []const []const u8) ExecvError {
return execve(allocator, argv, null);
}
@@ -846,7 +846,7 @@ pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
/// Due to the heap-allocation, it is illegal to call this function in a fork() child.
/// For that use case, use the `std.os` functions directly.
pub fn execve(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
argv: []const []const u8,
env_map: ?*const std.BufMap,
) ExecvError {
@@ -854,7 +854,7 @@ pub fn execve(
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index 37b783771f39..ab844fef57dc 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
var args = try process.argsAlloc(allocator);
defer process.argsFree(allocator, args);
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index e72204377fef..f90e8aa58ec0 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined;
var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer);
fn processArgs() void {
- const args = std.process.argsAlloc(&args_allocator.allocator) catch {
+ const args = std.process.argsAlloc(args_allocator.getAllocator()) catch {
@panic("Too many bytes passed over the CLI to the test runner");
};
if (args.len != 2) {
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 8a7fb923de17..3f44b19bc2ba 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -1323,15 +1323,15 @@ pub const Target = struct {
pub const stack_align = 16;
- pub fn zigTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
+ pub fn zigTriple(self: Target, allocator: mem.Allocator) ![]u8 {
return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator);
}
- pub fn linuxTripleSimple(allocator: *mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
+ pub fn linuxTripleSimple(allocator: mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(cpu_arch), @tagName(os_tag), @tagName(abi) });
}
- pub fn linuxTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
+ pub fn linuxTriple(self: Target, allocator: mem.Allocator) ![]u8 {
return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi);
}
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 53fc05f6dbc0..b588abbd8caf 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -7,11 +7,11 @@ const print = std.debug.print;
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
/// This should only be used in temporary test programs.
-pub const allocator = &allocator_instance.allocator;
+pub const allocator = allocator_instance.getAllocator();
pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
-pub const failing_allocator = &failing_allocator_instance.allocator;
-pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
+pub const failing_allocator = failing_allocator_instance.getAllocator();
+pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.getAllocator(), 0);
pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index d8b243d0fae5..137af925ad69 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -12,10 +12,9 @@ const mem = std.mem;
/// Then use `failing_allocator` anywhere you would have used a
/// different allocator.
pub const FailingAllocator = struct {
- allocator: mem.Allocator,
index: usize,
fail_index: usize,
- internal_allocator: *mem.Allocator,
+ internal_allocator: mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
allocations: usize,
@@ -29,7 +28,7 @@ pub const FailingAllocator = struct {
/// var a = try failing_alloc.create(i32);
/// var b = try failing_alloc.create(i32);
/// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
- pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
+ pub fn init(allocator: mem.Allocator, fail_index: usize) FailingAllocator {
return FailingAllocator{
.internal_allocator = allocator,
.fail_index = fail_index,
@@ -38,25 +37,24 @@ pub const FailingAllocator = struct {
.freed_bytes = 0,
.allocations = 0,
.deallocations = 0,
- .allocator = mem.Allocator{
- .allocFn = alloc,
- .resizeFn = resize,
- },
};
}
+ pub fn getAllocator(self: *FailingAllocator) mem.Allocator {
+ return mem.Allocator.init(self, alloc, resize);
+ }
+
fn alloc(
- allocator: *std.mem.Allocator,
+ self: *FailingAllocator,
len: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
- const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address);
+ const result = try self.internal_allocator.allocFn(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
self.allocated_bytes += result.len;
self.allocations += 1;
self.index += 1;
@@ -64,15 +62,14 @@ pub const FailingAllocator = struct {
}
fn resize(
- allocator: *std.mem.Allocator,
+ self: *FailingAllocator,
old_mem: []u8,
old_align: u29,
new_len: usize,
len_align: u29,
ra: usize,
) error{OutOfMemory}!usize {
- const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
- const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| {
+ const r = self.internal_allocator.resizeFn(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
std.debug.assert(new_len > old_mem.len);
return e;
};
diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig
index 947a1030bb86..0bd7f378326a 100644
--- a/lib/std/unicode.zig
+++ b/lib/std/unicode.zig
@@ -550,7 +550,7 @@ fn testDecode(bytes: []const u8) !u21 {
}
/// Caller must free returned memory.
-pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
+pub fn utf16leToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) ![]u8 {
// optimistically guess that it will all be ascii.
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
errdefer result.deinit();
@@ -567,7 +567,7 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
}
/// Caller must free returned memory.
-pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
+pub fn utf16leToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) ![:0]u8 {
// optimistically guess that it will all be ascii.
var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
errdefer result.deinit();
@@ -661,7 +661,7 @@ test "utf16leToUtf8" {
}
}
-pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
+pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u16 {
// optimistically guess that it will not require surrogate pairs
var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
errdefer result.deinit();
diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig
index f2ae8d34f671..f96c1bc1b9b9 100644
--- a/lib/std/wasm.zig
+++ b/lib/std/wasm.zig
@@ -361,7 +361,7 @@ pub const Type = struct {
std.mem.eql(Valtype, self.returns, other.returns);
}
- pub fn deinit(self: *Type, gpa: *std.mem.Allocator) void {
+ pub fn deinit(self: *Type, gpa: std.mem.Allocator) void {
gpa.free(self.params);
gpa.free(self.returns);
self.* = undefined;
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index 56981a74acaf..1420db8ec2fc 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -100,7 +100,7 @@ pub const BinNameOptions = struct {
};
/// Returns the standard file system basename of a binary generated by the Zig compiler.
-pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
+pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
const root_name = options.root_name;
const target = options.target;
const ofmt = options.object_format orelse target.getObjectFormat();
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 02672fbfd1c6..7729805c88e9 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -34,7 +34,7 @@ pub const Location = struct {
line_end: usize,
};
-pub fn deinit(tree: *Tree, gpa: *mem.Allocator) void {
+pub fn deinit(tree: *Tree, gpa: mem.Allocator) void {
tree.tokens.deinit(gpa);
tree.nodes.deinit(gpa);
gpa.free(tree.extra_data);
@@ -52,7 +52,7 @@ pub const RenderError = error{
/// for allocating extra stack memory if needed, because this function utilizes recursion.
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
-pub fn render(tree: Tree, gpa: *mem.Allocator) RenderError![]u8 {
+pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index 3c6057a8d902..03bb6bc5ffe6 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -520,7 +520,7 @@ pub fn isNative(self: CrossTarget) bool {
return self.isNativeCpu() and self.isNativeOs() and self.isNativeAbi();
}
-pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory}![]u8 {
+pub fn zigTriple(self: CrossTarget, allocator: mem.Allocator) error{OutOfMemory}![]u8 {
if (self.isNative()) {
return allocator.dupe(u8, "native");
}
@@ -559,13 +559,13 @@ pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory
return result.toOwnedSlice();
}
-pub fn allocDescription(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
+pub fn allocDescription(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
// TODO is there anything else worthy of the description that is not
// already captured in the triple?
return self.zigTriple(allocator);
}
-pub fn linuxTriple(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
+pub fn linuxTriple(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
return Target.linuxTripleSimple(allocator, self.getCpuArch(), self.getOsTag(), self.getAbi());
}
@@ -576,7 +576,7 @@ pub fn wantSharedLibSymLinks(self: CrossTarget) bool {
pub const VcpkgLinkage = std.builtin.LinkMode;
/// Returned slice must be freed by the caller.
-pub fn vcpkgTriplet(self: CrossTarget, allocator: *mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
+pub fn vcpkgTriplet(self: CrossTarget, allocator: mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
const arch = switch (self.getCpuArch()) {
.i386 => "x86",
.x86_64 => "x64",
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 28a0c1a19635..89abb3500666 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -11,7 +11,7 @@ pub const Error = error{ParseError} || Allocator.Error;
/// Result should be freed with tree.deinit() when there are
/// no more references to any of the tokens or nodes.
-pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
+pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
var tokens = Ast.TokenList{};
defer tokens.deinit(gpa);
@@ -81,7 +81,7 @@ const null_node: Node.Index = 0;
/// Represents in-progress parsing, will be converted to an Ast after completion.
const Parser = struct {
- gpa: *Allocator,
+ gpa: Allocator,
source: []const u8,
token_tags: []const Token.Tag,
token_starts: []const Ast.ByteOffset,
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index fb1a8120d4de..0fb435791762 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1220,7 +1220,7 @@ test "zig fmt: doc comments on param decl" {
try testCanonical(
\\pub const Allocator = struct {
\\ shrinkFn: fn (
- \\ self: *Allocator,
+ \\ self: Allocator,
\\ /// Guaranteed to be the same as what was returned from most recent call to
\\ /// `allocFn`, `reallocFn`, or `shrinkFn`.
\\ old_mem: []u8,
@@ -4250,7 +4250,7 @@ test "zig fmt: Only indent multiline string literals in function calls" {
test "zig fmt: Don't add extra newline after if" {
try testCanonical(
- \\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+ \\pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
\\ if (cwd().symLink(existing_path, new_path, .{})) {
\\ return;
\\ }
@@ -5319,7 +5319,7 @@ const maxInt = std.math.maxInt;
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn testParse(source: [:0]const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
+fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
const stderr = io.getStdErr().writer();
var tree = try std.zig.parse(allocator, source);
@@ -5351,9 +5351,10 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
+ var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), maxInt(usize));
+ const allocator = failing_allocator.getAllocator();
var anything_changed: bool = undefined;
- const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
+ const result_source = try testParse(source, allocator, &anything_changed);
try std.testing.expectEqualStrings(expected_source, result_source);
const changes_expected = source.ptr != expected_source.ptr;
if (anything_changed != changes_expected) {
@@ -5361,16 +5362,16 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
return error.TestFailed;
}
try std.testing.expect(anything_changed == changes_expected);
- failing_allocator.allocator.free(result_source);
+ allocator.free(result_source);
break :x failing_allocator.index;
};
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
+ var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), fail_index);
var anything_changed: bool = undefined;
- if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
+ if (testParse(source, failing_allocator.getAllocator(), &anything_changed)) |_| {
return error.NondeterministicMemoryUsage;
} else |err| switch (err) {
error.OutOfMemory => {
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index b6f513cc0a54..d2286914b048 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -33,7 +33,7 @@ pub fn main() !void {
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var allocator = &fixed_buf_alloc.allocator;
+ var allocator = fixed_buf_alloc.getAllocator();
_ = std.zig.parse(allocator, source) catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 8a909bf562aa..a703e1f3ea5f 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -37,7 +37,7 @@ pub fn renderTree(buffer: *std.ArrayList(u8), tree: Ast) Error!void {
}
/// Render all members in the given slice, keeping empty lines where appropriate
-fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
+fn renderMembers(gpa: Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
if (members.len == 0) return;
try renderMember(gpa, ais, tree, members[0], .newline);
for (members[1..]) |member| {
@@ -46,7 +46,7 @@ fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Nod
}
}
-fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
+fn renderMember(gpa: Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
const datas = tree.nodes.items(.data);
@@ -168,7 +168,7 @@ fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, spa
}
/// Render all expressions in the slice, keeping empty lines where appropriate
-fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
+fn renderExpressions(gpa: Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
if (expressions.len == 0) return;
try renderExpression(gpa, ais, tree, expressions[0], space);
for (expressions[1..]) |expression| {
@@ -177,7 +177,7 @@ fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const
}
}
-fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const main_tokens = tree.nodes.items(.main_token);
const node_tags = tree.nodes.items(.tag);
@@ -710,7 +710,7 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
}
fn renderArrayType(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
array_type: Ast.full.ArrayType,
@@ -732,7 +732,7 @@ fn renderArrayType(
}
fn renderPtrType(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
ptr_type: Ast.full.PtrType,
@@ -825,7 +825,7 @@ fn renderPtrType(
}
fn renderSlice(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
slice_node: Ast.Node.Index,
@@ -861,7 +861,7 @@ fn renderSlice(
}
fn renderAsmOutput(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
asm_output: Ast.Node.Index,
@@ -891,7 +891,7 @@ fn renderAsmOutput(
}
fn renderAsmInput(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
asm_input: Ast.Node.Index,
@@ -912,7 +912,7 @@ fn renderAsmInput(
return renderToken(ais, tree, datas[asm_input].rhs, space); // rparen
}
-fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
+fn renderVarDecl(gpa: Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
if (var_decl.visib_token) |visib_token| {
try renderToken(ais, tree, visib_token, Space.space); // pub
}
@@ -1019,7 +1019,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
return renderToken(ais, tree, var_decl.ast.mut_token + 2, .newline); // ;
}
-fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
+fn renderIf(gpa: Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
return renderWhile(gpa, ais, tree, .{
.ast = .{
.while_token = if_node.ast.if_token,
@@ -1038,7 +1038,7 @@ fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space:
/// Note that this function is additionally used to render if and for expressions, with
/// respective values set to null.
-fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
+fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
const node_tags = tree.nodes.items(.tag);
const token_tags = tree.tokens.items(.tag);
@@ -1141,7 +1141,7 @@ fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While
}
fn renderContainerField(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
field: Ast.full.ContainerField,
@@ -1215,7 +1215,7 @@ fn renderContainerField(
}
fn renderBuiltinCall(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
builtin_token: Ast.TokenIndex,
@@ -1272,7 +1272,7 @@ fn renderBuiltinCall(
}
}
-fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
+fn renderFnProto(gpa: Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const token_starts = tree.tokens.items(.start);
@@ -1488,7 +1488,7 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
}
fn renderSwitchCase(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
switch_case: Ast.full.SwitchCase,
@@ -1541,7 +1541,7 @@ fn renderSwitchCase(
}
fn renderBlock(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
block_node: Ast.Node.Index,
@@ -1581,7 +1581,7 @@ fn renderBlock(
}
fn renderStructInit(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
struct_node: Ast.Node.Index,
@@ -1640,7 +1640,7 @@ fn renderStructInit(
}
fn renderArrayInit(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
array_init: Ast.full.ArrayInit,
@@ -1859,7 +1859,7 @@ fn renderArrayInit(
}
fn renderContainerDecl(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
container_decl_node: Ast.Node.Index,
@@ -1956,7 +1956,7 @@ fn renderContainerDecl(
}
fn renderAsm(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
asm_node: Ast.full.Asm,
@@ -2105,7 +2105,7 @@ fn renderAsm(
}
fn renderCall(
- gpa: *Allocator,
+ gpa: Allocator,
ais: *Ais,
tree: Ast,
call: Ast.full.Call,
@@ -2180,7 +2180,7 @@ fn renderCall(
/// Renders the given expression indented, popping the indent before rendering
/// any following line comments
-fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpressionIndented(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
const token_starts = tree.tokens.items(.start);
const token_tags = tree.tokens.items(.tag);
@@ -2238,7 +2238,7 @@ fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Nod
/// Render an expression, and the comma that follows it, if it is present in the source.
/// If a comma is present, and `space` is `Space.comma`, render only a single comma.
-fn renderExpressionComma(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpressionComma(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
const token_tags = tree.tokens.items(.tag);
const maybe_comma = tree.lastToken(node) + 1;
if (token_tags[maybe_comma] == .comma and space != .comma) {
diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig
index 2a38195b1f6f..5e44e5f8f3a0 100644
--- a/lib/std/zig/string_literal.zig
+++ b/lib/std/zig/string_literal.zig
@@ -131,7 +131,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
/// Higher level API. Does not return extra info about parse errors.
/// Caller owns returned memory.
-pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
+pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
@@ -147,7 +147,7 @@ test "parse" {
var fixed_buf_mem: [32]u8 = undefined;
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]);
- var alloc = &fixed_buf_alloc.allocator;
+ var alloc = fixed_buf_alloc.getAllocator();
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\"")));
try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\"")));
diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig
index 353ad2509682..5ba0d8198c2a 100644
--- a/lib/std/zig/system.zig
+++ b/lib/std/zig/system.zig
@@ -21,7 +21,7 @@ pub const NativePaths = struct {
rpaths: ArrayList([:0]u8),
warnings: ArrayList([:0]u8),
- pub fn detect(allocator: *Allocator, native_info: NativeTargetInfo) !NativePaths {
+ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths {
const native_target = native_info.target;
var self: NativePaths = .{
@@ -237,7 +237,7 @@ pub const NativeTargetInfo = struct {
/// Any resources this function allocates are released before returning, and so there is no
/// deinitialization method.
/// TODO Remove the Allocator requirement from this function.
- pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
+ pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch());
if (cross_target.os_tag == null) {
switch (builtin.target.os.tag) {
@@ -441,7 +441,7 @@ pub const NativeTargetInfo = struct {
/// we fall back to the defaults.
/// TODO Remove the Allocator requirement from this function.
fn detectAbiAndDynamicLinker(
- allocator: *Allocator,
+ allocator: Allocator,
cpu: Target.Cpu,
os: Target.Os,
cross_target: CrossTarget,
diff --git a/lib/std/zig/system/darwin.zig b/lib/std/zig/system/darwin.zig
index 5ce769a79210..c20607440d2e 100644
--- a/lib/std/zig/system/darwin.zig
+++ b/lib/std/zig/system/darwin.zig
@@ -11,7 +11,7 @@ pub const macos = @import("darwin/macos.zig");
/// Therefore, we resort to the same tool used by Homebrew, namely, invoking `xcode-select --print-path`
/// and checking if the status is nonzero or the returned string in nonempty.
/// https://github.com/Homebrew/brew/blob/e119bdc571dcb000305411bc1e26678b132afb98/Library/Homebrew/brew.sh#L630
-pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
+pub fn isDarwinSDKInstalled(allocator: Allocator) bool {
const argv = &[_][]const u8{ "/usr/bin/xcode-select", "--print-path" };
const result = std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv }) catch return false;
defer {
@@ -29,7 +29,7 @@ pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
/// Calls `xcrun --sdk malloc, realloc, and free.
When linking against libc, Zig exposes this allocator with {#syntax#}std.heap.c_allocator{#endsyntax#}.
However, by convention, there is no default allocator in Zig. Instead, functions which need to
- allocate accept an {#syntax#}*Allocator{#endsyntax#} parameter. Likewise, data structures such as
- {#syntax#}std.ArrayList{#endsyntax#} accept an {#syntax#}*Allocator{#endsyntax#} parameter in
+ allocate accept an {#syntax#}Allocator{#endsyntax#} parameter. Likewise, data structures such as
+ {#syntax#}std.ArrayList{#endsyntax#} accept an {#syntax#}Allocator{#endsyntax#} parameter in
their initialization functions:
For example, the function's documentation may say "caller owns the returned memory", in which case the code that calls the function must have a plan for when to free that memory. Probably in this situation, - the function will accept an {#syntax#}*Allocator{#endsyntax#} parameter. + the function will accept an {#syntax#}Allocator{#endsyntax#} parameter.
Sometimes the lifetime of a pointer may be more complicated. For example, the
@@ -10820,7 +10820,7 @@ const std = @import("std");
pub fn main() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
- const gpa = &general_purpose_allocator.allocator;
+ const gpa = general_purpose_allocator.getAllocator();
const args = try std.process.argsAlloc(gpa);
defer std.process.argsFree(gpa, args);
@@ -10842,7 +10842,7 @@ const PreopenList = std.fs.wasi.PreopenList;
pub fn main() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
- const gpa = &general_purpose_allocator.allocator;
+ const gpa = general_purpose_allocator.getAllocator();
var preopens = PreopenList.init(gpa);
defer preopens.deinit();
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 413b23cd48af..191ed4de94df 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -173,12 +173,12 @@ pub const Loop = struct {
// We need at least one of these in case the fs thread wants to use onNextTick
const extra_thread_count = thread_count - 1;
const resume_node_count = std.math.max(extra_thread_count, 1);
- self.eventfd_resume_nodes = try self.arena.allocator.alloc(
+ self.eventfd_resume_nodes = try self.arena.getAllocator().alloc(
std.atomic.Stack(ResumeNode.EventFd).Node,
resume_node_count,
);
- self.extra_threads = try self.arena.allocator.alloc(Thread, extra_thread_count);
+ self.extra_threads = try self.arena.getAllocator().alloc(Thread, extra_thread_count);
try self.initOsData(extra_thread_count);
errdefer self.deinitOsData();
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 5cc7f8ef6581..2e58c8c5d9a4 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -98,7 +98,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
var astgen: AstGen = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.tree = &tree,
};
defer astgen.deinit(gpa);
@@ -1939,6 +1939,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa);
defer block_arena.deinit();
+ const block_arena_allocator = block_arena.getAllocator();
var noreturn_src_node: Ast.Node.Index = 0;
var scope = parent_scope;
@@ -1959,13 +1960,13 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod
}
switch (node_tags[statement]) {
// zig fmt: off
- .global_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.globalVarDecl(statement)),
- .local_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.localVarDecl(statement)),
- .simple_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.simpleVarDecl(statement)),
- .aligned_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.alignedVarDecl(statement)),
+ .global_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.globalVarDecl(statement)),
+ .local_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.localVarDecl(statement)),
+ .simple_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.simpleVarDecl(statement)),
+ .aligned_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.alignedVarDecl(statement)),
- .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_normal),
- .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_error),
+ .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, block_arena_allocator, .defer_normal),
+ .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, block_arena_allocator, .defer_error),
.assign => try assign(gz, scope, statement),
diff --git a/src/Compilation.zig b/src/Compilation.zig
index f6ee58b5efef..7c855862fd56 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -412,28 +412,29 @@ pub const AllErrors = struct {
errors: *std.ArrayList(Message),
module_err_msg: Module.ErrorMsg,
) !void {
- const notes = try arena.allocator.alloc(Message, module_err_msg.notes.len);
+ const allocator = arena.getAllocator();
+ const notes = try allocator.alloc(Message, module_err_msg.notes.len);
for (notes) |*note, i| {
const module_note = module_err_msg.notes[i];
const source = try module_note.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_note.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source, byte_offset);
- const file_path = try module_note.src_loc.file_scope.fullPath(&arena.allocator);
+ const file_path = try module_note.src_loc.file_scope.fullPath(allocator);
note.* = .{
.src = .{
.src_path = file_path,
- .msg = try arena.allocator.dupe(u8, module_note.msg),
+ .msg = try allocator.dupe(u8, module_note.msg),
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
- .source_line = try arena.allocator.dupe(u8, loc.source_line),
+ .source_line = try allocator.dupe(u8, loc.source_line),
},
};
}
if (module_err_msg.src_loc.lazy == .entire_file) {
try errors.append(.{
.plain = .{
- .msg = try arena.allocator.dupe(u8, module_err_msg.msg),
+ .msg = try allocator.dupe(u8, module_err_msg.msg),
},
});
return;
@@ -441,16 +442,16 @@ pub const AllErrors = struct {
const source = try module_err_msg.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_err_msg.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source, byte_offset);
- const file_path = try module_err_msg.src_loc.file_scope.fullPath(&arena.allocator);
+ const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator);
try errors.append(.{
.src = .{
.src_path = file_path,
- .msg = try arena.allocator.dupe(u8, module_err_msg.msg),
+ .msg = try allocator.dupe(u8, module_err_msg.msg),
.byte_offset = byte_offset,
.line = @intCast(u32, loc.line),
.column = @intCast(u32, loc.column),
.notes = notes,
- .source_line = try arena.allocator.dupe(u8, loc.source_line),
+ .source_line = try allocator.dupe(u8, loc.source_line),
},
});
}
@@ -548,11 +549,12 @@ pub const AllErrors = struct {
msg: []const u8,
optional_children: ?AllErrors,
) !void {
- const duped_msg = try arena.allocator.dupe(u8, msg);
+ const allocator = arena.getAllocator();
+ const duped_msg = try allocator.dupe(u8, msg);
if (optional_children) |*children| {
try errors.append(.{ .plain = .{
.msg = duped_msg,
- .notes = try dupeList(children.list, &arena.allocator),
+ .notes = try dupeList(children.list, allocator),
} });
} else {
try errors.append(.{ .plain = .{ .msg = duped_msg } });
@@ -786,7 +788,7 @@ fn addPackageTableToCacheHash(
seen_table: *std.AutoHashMap(*Package, void),
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
) (error{OutOfMemory} || std.os.GetCwdError)!void {
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const packages = try allocator.alloc(Package.Table.KV, pkg_table.count());
{
@@ -850,7 +852,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// initialization and then is freed in deinit().
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
// We put the `Compilation` itself in the arena. Freeing the arena will free the module.
// It's initialized later after we prepare the initialization options.
@@ -1208,7 +1210,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
{
var local_arena = std.heap.ArenaAllocator.init(gpa);
defer local_arena.deinit();
- var seen_table = std.AutoHashMap(*Package, void).init(&local_arena.allocator);
+ var seen_table = std.AutoHashMap(*Package, void).init(local_arena.getAllocator());
try addPackageTableToCacheHash(&hash, &local_arena, main_pkg.table, &seen_table, .path_bytes);
}
hash.add(valgrind);
@@ -2011,6 +2013,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
var arena = std.heap.ArenaAllocator.init(self.gpa);
errdefer arena.deinit();
+ const arena_allocator = arena.getAllocator();
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
defer errors.deinit();
@@ -2024,8 +2027,8 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
// C error reporting bubbling up.
try errors.append(.{
.src = .{
- .src_path = try arena.allocator.dupe(u8, c_object.src.src_path),
- .msg = try std.fmt.allocPrint(&arena.allocator, "unable to build C object: {s}", .{
+ .src_path = try arena_allocator.dupe(u8, c_object.src.src_path),
+ .msg = try std.fmt.allocPrint(arena_allocator, "unable to build C object: {s}", .{
err_msg.msg,
}),
.byte_offset = 0,
@@ -2050,7 +2053,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
// must have completed successfully.
const tree = try entry.key_ptr.*.getTree(module.gpa);
assert(tree.errors.len == 0);
- try AllErrors.addZir(&arena.allocator, &errors, entry.key_ptr.*);
+ try AllErrors.addZir(arena_allocator, &errors, entry.key_ptr.*);
}
}
}
@@ -2089,7 +2092,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
try errors.append(.{
.plain = .{
- .msg = try std.fmt.allocPrint(&arena.allocator, "no entry point found", .{}),
+ .msg = try std.fmt.allocPrint(arena_allocator, "no entry point found", .{}),
},
});
}
@@ -2121,7 +2124,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
assert(errors.items.len == self.totalErrorCount());
return AllErrors{
- .list = try arena.allocator.dupe(AllErrors.Message, errors.items),
+ .list = try arena_allocator.dupe(AllErrors.Message, errors.items),
.arena = arena.state,
};
}
@@ -2292,7 +2295,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
var tmp_arena = std.heap.ArenaAllocator.init(gpa);
defer tmp_arena.deinit();
- const sema_arena = &tmp_arena.allocator;
+ const sema_arena = tmp_arena.getAllocator();
const sema_frame = tracy.namedFrame("sema");
var sema_frame_ended = false;
@@ -2387,7 +2390,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.init(gpa),
- .typedefs_arena = &typedefs_arena.allocator,
+ .typedefs_arena = typedefs_arena.getAllocator(),
};
defer dg.fwd_decl.deinit();
defer dg.typedefs.deinit();
@@ -2841,7 +2844,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const digest = if (!actual_hit) digest: {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const tmp_digest = man.hash.peek();
const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest });
@@ -3096,7 +3099,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const c_source_basename = std.fs.path.basename(c_object.src.src_path);
@@ -4417,7 +4420,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
// Here we use the legacy stage1 C++ compiler to compile Zig code.
const mod = comp.bin_file.options.module.?;
@@ -4454,7 +4457,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
_ = try man.addFile(main_zig_file, null);
{
- var seen_table = std.AutoHashMap(*Package, void).init(&arena_allocator.allocator);
+ var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.getAllocator());
try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = &man });
}
man.hash.add(comp.bin_file.options.valgrind);
diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig
index 0fd26532f0b9..e99bfc746471 100644
--- a/src/DepTokenizer.zig
+++ b/src/DepTokenizer.zig
@@ -878,7 +878,7 @@ test "error prereq - continuation expecting end-of-line" {
// - tokenize input, emit textual representation, and compare to expect
fn depTokenizer(input: []const u8, expect: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
defer arena_allocator.deinit();
var it: Tokenizer = .{ .bytes = input };
diff --git a/src/Module.zig b/src/Module.zig
index a40dcd14198e..d016418d8d8e 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -517,7 +517,7 @@ pub const Decl = struct {
pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void {
assert(decl.value_arena == null);
- const arena_state = try arena.allocator.create(std.heap.ArenaAllocator.State);
+ const arena_state = try arena.getAllocator().create(std.heap.ArenaAllocator.State);
arena_state.* = arena.state;
decl.value_arena = arena_state;
}
@@ -3159,10 +3159,11 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
const gpa = mod.gpa;
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const struct_obj = try new_decl_arena.allocator.create(Module.Struct);
- const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj);
- const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty);
+ const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
+ const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
+ const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
const ty_ty = comptime Type.initTag(.type);
struct_obj.* = .{
.owner_decl = undefined, // set below
@@ -3202,12 +3203,13 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
+ const sema_arena_allocator = sema_arena.getAllocator();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &sema_arena.allocator,
- .perm_arena = &new_decl_arena.allocator,
+ .arena = sema_arena_allocator,
+ .perm_arena = new_decl_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.func = null,
@@ -3216,7 +3218,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, &new_decl_arena.allocator, null);
+ var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null);
defer wip_captures.deinit();
var block_scope: Sema.Block = .{
@@ -3265,15 +3267,17 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// We need the memory for the Type to go into the arena for the Decl
var decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer decl_arena.deinit();
+ const decl_arena_allocator = decl_arena.getAllocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
+ const analysis_arena_allocator = analysis_arena.getAllocator();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &analysis_arena.allocator,
- .perm_arena = &decl_arena.allocator,
+ .arena = analysis_arena_allocator,
+ .perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.func = null,
@@ -3296,7 +3300,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
log.debug("semaDecl {*} ({s})", .{ decl, decl.name });
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Sema.Block = .{
@@ -3356,7 +3360,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
// not the struct itself.
try sema.resolveTypeLayout(&block_scope, src, decl_tv.ty);
- const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+ const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State);
if (decl.is_usingnamespace) {
const ty_ty = Type.initTag(.type);
@@ -3370,7 +3374,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
}
decl.ty = ty_ty;
- decl.val = try Value.Tag.ty.create(&decl_arena.allocator, ty);
+ decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty);
decl.align_val = Value.initTag(.null_value);
decl.linksection_val = Value.initTag(.null_value);
decl.has_tv = true;
@@ -3400,10 +3404,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.clearValues(gpa);
}
- decl.ty = try decl_tv.ty.copy(&decl_arena.allocator);
- decl.val = try decl_tv.val.copy(&decl_arena.allocator);
- decl.align_val = try align_val.copy(&decl_arena.allocator);
- decl.linksection_val = try linksection_val.copy(&decl_arena.allocator);
+ decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
+ decl.val = try decl_tv.val.copy(decl_arena_allocator);
+ decl.align_val = try align_val.copy(decl_arena_allocator);
+ decl.linksection_val = try linksection_val.copy(decl_arena_allocator);
decl.@"addrspace" = address_space;
decl.has_tv = true;
decl.owns_tv = owns_tv;
@@ -3453,7 +3457,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.owns_tv = true;
queue_linker_work = true;
- const copied_init = try variable.init.copy(&decl_arena.allocator);
+ const copied_init = try variable.init.copy(decl_arena_allocator);
variable.init = copied_init;
}
},
@@ -3476,10 +3480,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
},
}
- decl.ty = try decl_tv.ty.copy(&decl_arena.allocator);
- decl.val = try decl_tv.val.copy(&decl_arena.allocator);
- decl.align_val = try align_val.copy(&decl_arena.allocator);
- decl.linksection_val = try linksection_val.copy(&decl_arena.allocator);
+ decl.ty = try decl_tv.ty.copy(decl_arena_allocator);
+ decl.val = try decl_tv.val.copy(decl_arena_allocator);
+ decl.align_val = try align_val.copy(decl_arena_allocator);
+ decl.linksection_val = try linksection_val.copy(decl_arena_allocator);
decl.@"addrspace" = address_space;
decl.has_tv = true;
decl_arena_state.* = decl_arena.state;
@@ -4128,12 +4132,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
// Use the Decl's arena for captured values.
var decl_arena = decl.value_arena.?.promote(gpa);
defer decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.getAllocator();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = arena,
- .perm_arena = &decl_arena.allocator,
+ .perm_arena = decl_arena_allocator,
.code = decl.getFileScope().zir,
.owner_decl = decl,
.func = func,
@@ -4147,7 +4152,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem
try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
sema.air_extra.items.len += reserved_count;
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var inner_block: Sema.Block = .{
@@ -4751,7 +4756,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
// decl reference it as a slice.
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const arena = &new_decl_arena.allocator;
+ const arena = new_decl_arena.getAllocator();
const test_fn_vals = try arena.alloc(Value, mod.test_functions.count());
const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{
@@ -4770,10 +4775,10 @@ pub fn populateTestFunctions(mod: *Module) !void {
const test_name_decl = n: {
var name_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer name_decl_arena.deinit();
- const bytes = try name_decl_arena.allocator.dupe(u8, test_name_slice);
+ const bytes = try arena.dupe(u8, test_name_slice);
const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{
- .ty = try Type.Tag.array_u8.create(&name_decl_arena.allocator, bytes.len),
- .val = try Value.Tag.bytes.create(&name_decl_arena.allocator, bytes),
+ .ty = try Type.Tag.array_u8.create(arena, bytes.len),
+ .val = try Value.Tag.bytes.create(arena, bytes),
});
try test_name_decl.finalizeNewArena(&name_decl_arena);
break :n test_name_decl;
@@ -4802,7 +4807,7 @@ pub fn populateTestFunctions(mod: *Module) !void {
{
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
- const arena = &new_decl_arena.allocator;
+ const arena = new_decl_arena.getAllocator();
// This copy accesses the old Decl Type/Value so it must be done before `clearValues`.
const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena));
diff --git a/src/Sema.zig b/src/Sema.zig
index ce0c5c8ed75c..9e0aa2f75edb 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -418,7 +418,7 @@ pub const Block = struct {
finished: bool,
pub fn arena(wad: *WipAnonDecl) Allocator {
- return &wad.new_decl_arena.allocator;
+ return wad.new_decl_arena.getAllocator();
}
pub fn deinit(wad: *WipAnonDecl) void {
@@ -1594,10 +1594,11 @@ fn zirStructDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const struct_obj = try new_decl_arena.allocator.create(Module.Struct);
- const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj);
- const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty);
+ const struct_obj = try new_decl_arena_allocator.create(Module.Struct);
+ const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj);
+ const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -1698,15 +1699,16 @@ fn zirEnumDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const enum_obj = try new_decl_arena.allocator.create(Module.EnumFull);
- const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumFull);
+ const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull);
+ const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull);
enum_ty_payload.* = .{
.base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full },
.data = enum_obj,
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
- const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -1790,17 +1792,17 @@ fn zirEnumDecl(
break :blk try sema.resolveType(block, src, tag_type_ref);
}
const bits = std.math.log2_int_ceil(usize, fields_len);
- break :blk try Type.Tag.int_unsigned.create(&new_decl_arena.allocator, bits);
+ break :blk try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits);
};
enum_obj.tag_ty = tag_ty;
}
- try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| {
if (bag != 0) break true;
} else false;
if (any_values) {
- try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{
+ try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{
.ty = enum_obj.tag_ty,
});
}
@@ -1820,7 +1822,7 @@ fn zirEnumDecl(
extra_index += 1;
// This string needs to outlive the ZIR code.
- const field_name = try new_decl_arena.allocator.dupe(u8, field_name_zir);
+ const field_name = try new_decl_arena_allocator.dupe(u8, field_name_zir);
const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name);
if (gop.found_existing) {
@@ -1843,12 +1845,12 @@ fn zirEnumDecl(
// that points to this default value expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val;
- const copied_tag_val = try tag_val.copy(&new_decl_arena.allocator);
+ const copied_tag_val = try tag_val.copy(new_decl_arena_allocator);
enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{
.ty = enum_obj.tag_ty,
});
} else if (any_values) {
- const tag_val = try Value.Tag.int_u64.create(&new_decl_arena.allocator, field_i);
+ const tag_val = try Value.Tag.int_u64.create(new_decl_arena_allocator, field_i);
enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty });
}
}
@@ -1887,16 +1889,17 @@ fn zirUnionDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const union_obj = try new_decl_arena.allocator.create(Module.Union);
+ const union_obj = try new_decl_arena_allocator.create(Module.Union);
const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union";
- const union_payload = try new_decl_arena.allocator.create(Type.Payload.Union);
+ const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union);
union_payload.* = .{
.base = .{ .tag = type_tag },
.data = union_obj,
};
const union_ty = Type.initPayload(&union_payload.base);
- const union_val = try Value.Tag.ty.create(&new_decl_arena.allocator, union_ty);
+ const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -1955,15 +1958,16 @@ fn zirOpaqueDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const opaque_obj = try new_decl_arena.allocator.create(Module.Opaque);
- const opaque_ty_payload = try new_decl_arena.allocator.create(Type.Payload.Opaque);
+ const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque);
+ const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque);
opaque_ty_payload.* = .{
.base = .{ .tag = .@"opaque" },
.data = opaque_obj,
};
const opaque_ty = Type.initPayload(&opaque_ty_payload.base);
- const opaque_val = try Value.Tag.ty.create(&new_decl_arena.allocator, opaque_ty);
+ const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty);
const type_name = try sema.createTypeName(block, small.name_strategy);
const new_decl = try mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -2008,10 +2012,11 @@ fn zirErrorSetDecl(
var new_decl_arena = std.heap.ArenaAllocator.init(gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const error_set = try new_decl_arena.allocator.create(Module.ErrorSet);
- const error_set_ty = try Type.Tag.error_set.create(&new_decl_arena.allocator, error_set);
- const error_set_val = try Value.Tag.ty.create(&new_decl_arena.allocator, error_set_ty);
+ const error_set = try new_decl_arena_allocator.create(Module.ErrorSet);
+ const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set);
+ const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty);
const type_name = try sema.createTypeName(block, name_strategy);
const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{
.ty = Type.type,
@@ -2019,9 +2024,9 @@ fn zirErrorSetDecl(
}, type_name);
new_decl.owns_tv = true;
errdefer sema.mod.abortAnonDecl(new_decl);
- const names = try new_decl_arena.allocator.alloc([]const u8, fields.len);
+ const names = try new_decl_arena_allocator.alloc([]const u8, fields.len);
for (fields) |str_index, i| {
- names[i] = try new_decl_arena.allocator.dupe(u8, sema.code.nullTerminatedString(str_index));
+ names[i] = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index));
}
error_set.* = .{
.owner_decl = new_decl,
@@ -3935,7 +3940,7 @@ fn analyzeCall(
{
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
for (memoized_call_key.args) |*arg| {
arg.* = try arg.*.copy(arena);
@@ -4069,6 +4074,7 @@ fn analyzeCall(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
// Re-run the block that creates the function, with the comptime parameters
// pre-populated inside `inst_map`. This causes `param_comptime` and
@@ -4078,13 +4084,13 @@ fn analyzeCall(
.mod = mod,
.gpa = gpa,
.arena = sema.arena,
- .perm_arena = &new_decl_arena.allocator,
+ .perm_arena = new_decl_arena_allocator,
.code = fn_zir,
.owner_decl = new_decl,
.func = null,
.fn_ret_ty = Type.void,
.owner_func = null,
- .comptime_args = try new_decl_arena.allocator.alloc(TypedValue, uncasted_args.len),
+ .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len),
.comptime_args_fn_inst = module_fn.zir_body_inst,
.preallocated_new_func = new_module_func,
};
@@ -4168,7 +4174,7 @@ fn analyzeCall(
else => continue,
}
const arg = child_sema.inst_map.get(inst).?;
- const copied_arg_ty = try child_sema.typeOf(arg).copy(&new_decl_arena.allocator);
+ const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator);
if (child_sema.resolveMaybeUndefValAllowVariables(
&child_block,
.unneeded,
@@ -4176,7 +4182,7 @@ fn analyzeCall(
) catch unreachable) |arg_val| {
child_sema.comptime_args[arg_i] = .{
.ty = copied_arg_ty,
- .val = try arg_val.copy(&new_decl_arena.allocator),
+ .val = try arg_val.copy(new_decl_arena_allocator),
};
} else {
child_sema.comptime_args[arg_i] = .{
@@ -4191,8 +4197,8 @@ fn analyzeCall(
try wip_captures.finalize();
// Populate the Decl ty/val with the function and its type.
- new_decl.ty = try child_sema.typeOf(new_func_inst).copy(&new_decl_arena.allocator);
- new_decl.val = try Value.Tag.function.create(&new_decl_arena.allocator, new_func);
+ new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator);
+ new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func);
new_decl.analysis = .complete;
log.debug("generic function '{s}' instantiated with type {}", .{
@@ -6047,8 +6053,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
defer arena.deinit();
const target = sema.mod.getTarget();
- const min_int = try operand_ty.minInt(&arena.allocator, target);
- const max_int = try operand_ty.maxInt(&arena.allocator, target);
+ const min_int = try operand_ty.minInt(arena.getAllocator(), target);
+ const max_int = try operand_ty.maxInt(arena.getAllocator(), target);
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return sema.fail(
@@ -12795,7 +12801,7 @@ const ComptimePtrMutationKit = struct {
fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator {
self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
- return &self.decl_arena.allocator;
+ return self.decl_arena.getAllocator();
}
fn finishArena(self: *ComptimePtrMutationKit) void {
@@ -14287,6 +14293,7 @@ fn semaStructFields(
var decl_arena = decl.value_arena.?.promote(gpa);
defer decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.getAllocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -14294,8 +14301,8 @@ fn semaStructFields(
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &analysis_arena.allocator,
- .perm_arena = &decl_arena.allocator,
+ .arena = analysis_arena.getAllocator(),
+ .perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.func = null,
@@ -14304,7 +14311,7 @@ fn semaStructFields(
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Block = .{
@@ -14328,7 +14335,7 @@ fn semaStructFields(
try wip_captures.finalize();
- try struct_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len);
+ try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len);
const bits_per_field = 4;
const fields_per_u32 = 32 / bits_per_field;
@@ -14359,7 +14366,7 @@ fn semaStructFields(
extra_index += 1;
// This string needs to outlive the ZIR code.
- const field_name = try decl_arena.allocator.dupe(u8, field_name_zir);
+ const field_name = try decl_arena_allocator.dupe(u8, field_name_zir);
const field_ty: Type = if (field_type_ref == .none)
Type.initTag(.noreturn)
else
@@ -14371,7 +14378,7 @@ fn semaStructFields(
const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name);
assert(!gop.found_existing);
gop.value_ptr.* = .{
- .ty = try field_ty.copy(&decl_arena.allocator),
+ .ty = try field_ty.copy(decl_arena_allocator),
.abi_align = Value.initTag(.abi_align_default),
.default_val = Value.initTag(.unreachable_value),
.is_comptime = is_comptime,
@@ -14385,7 +14392,7 @@ fn semaStructFields(
// that points to this alignment expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val;
- gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator);
+ gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator);
}
if (has_default) {
const default_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]);
@@ -14396,7 +14403,7 @@ fn semaStructFields(
// But only resolve the source location if we need to emit a compile error.
const default_val = (try sema.resolveMaybeUndefVal(&block_scope, src, default_inst)) orelse
return sema.failWithNeededComptime(&block_scope, src);
- gop.value_ptr.default_val = try default_val.copy(&decl_arena.allocator);
+ gop.value_ptr.default_val = try default_val.copy(decl_arena_allocator);
}
}
}
@@ -14454,6 +14461,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa);
defer union_obj.owner_decl.value_arena.?.* = decl_arena.state;
+ const decl_arena_allocator = decl_arena.getAllocator();
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
defer analysis_arena.deinit();
@@ -14461,8 +14469,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
- .arena = &analysis_arena.allocator,
- .perm_arena = &decl_arena.allocator,
+ .arena = analysis_arena.getAllocator(),
+ .perm_arena = decl_arena_allocator,
.code = zir,
.owner_decl = decl,
.func = null,
@@ -14471,7 +14479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
};
defer sema.deinit();
- var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope);
+ var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope);
defer wip_captures.deinit();
var block_scope: Block = .{
@@ -14495,7 +14503,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
try wip_captures.finalize();
- try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len);
+ try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len);
var int_tag_ty: Type = undefined;
var enum_field_names: ?*Module.EnumNumbered.NameMap = null;
@@ -14571,7 +14579,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
}
// This string needs to outlive the ZIR code.
- const field_name = try decl_arena.allocator.dupe(u8, field_name_zir);
+ const field_name = try decl_arena_allocator.dupe(u8, field_name_zir);
if (enum_field_names) |set| {
set.putAssumeCapacity(field_name, {});
}
@@ -14589,7 +14597,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
const gop = union_obj.fields.getOrPutAssumeCapacity(field_name);
assert(!gop.found_existing);
gop.value_ptr.* = .{
- .ty = try field_ty.copy(&decl_arena.allocator),
+ .ty = try field_ty.copy(decl_arena_allocator),
.abi_align = Value.initTag(.abi_align_default),
};
@@ -14598,7 +14606,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
// that points to this alignment expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val;
- gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator);
+ gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator);
} else {
gop.value_ptr.abi_align = Value.initTag(.abi_align_default);
}
@@ -14615,15 +14623,16 @@ fn generateUnionTagTypeNumbered(
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const enum_obj = try new_decl_arena.allocator.create(Module.EnumNumbered);
- const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumNumbered);
+ const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered);
+ const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered);
enum_ty_payload.* = .{
.base = .{ .tag = .enum_numbered },
.data = enum_obj,
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
- const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
// TODO better type name
const new_decl = try mod.createAnonymousDecl(block, .{
.ty = Type.type,
@@ -14640,8 +14649,8 @@ fn generateUnionTagTypeNumbered(
.node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
- try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
- try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = int_ty });
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
+ try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = int_ty });
try new_decl.finalizeNewArena(&new_decl_arena);
return enum_ty;
}
@@ -14651,15 +14660,16 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type
var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa);
errdefer new_decl_arena.deinit();
+ const new_decl_arena_allocator = new_decl_arena.getAllocator();
- const enum_obj = try new_decl_arena.allocator.create(Module.EnumSimple);
- const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumSimple);
+ const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple);
+ const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple);
enum_ty_payload.* = .{
.base = .{ .tag = .enum_simple },
.data = enum_obj,
};
const enum_ty = Type.initPayload(&enum_ty_payload.base);
- const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty);
+ const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty);
// TODO better type name
const new_decl = try mod.createAnonymousDecl(block, .{
.ty = Type.type,
@@ -14674,7 +14684,7 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type
.node_offset = 0,
};
// Here we pre-allocate the maps using the decl arena.
- try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len);
+ try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len);
try new_decl.finalizeNewArena(&new_decl_arena);
return enum_ty;
}
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 43776dea6723..142bf1a146fe 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -390,6 +390,7 @@ pub const DeclGen = struct {
// Fall back to generic implementation.
var arena = std.heap.ArenaAllocator.init(dg.module.gpa);
defer arena.deinit();
+ const arena_allocator = arena.getAllocator();
try writer.writeAll("{");
var index: usize = 0;
@@ -397,7 +398,7 @@ pub const DeclGen = struct {
const elem_ty = ty.elemType();
while (index < len) : (index += 1) {
if (index != 0) try writer.writeAll(",");
- const elem_val = try val.elemValue(&arena.allocator, index);
+ const elem_val = try val.elemValue(arena_allocator, index);
try dg.renderValue(writer, elem_ty, elem_val);
}
if (ty.sentinel()) |sentinel_val| {
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index e326b2a677c3..31d3461846d3 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -331,7 +331,7 @@ pub const Object = struct {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const mod = comp.bin_file.options.module.?;
const cache_dir = mod.zig_cache_artifact_directory;
@@ -779,7 +779,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
const opaque_obj = t.castTag(.@"opaque").?.data;
const name = try opaque_obj.getFullyQualifiedName(gpa);
@@ -837,7 +837,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
const struct_obj = t.castTag(.@"struct").?.data;
@@ -871,7 +871,7 @@ pub const DeclGen = struct {
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
- gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator);
+ gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator());
const union_obj = t.cast(Type.Payload.Union).?.data;
const target = dg.module.getTarget();
@@ -2485,7 +2485,7 @@ pub const FuncGen = struct {
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const llvm_params_len = args.len;
const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len);
diff --git a/src/crash_report.zig b/src/crash_report.zig
index f11f95fe0c8d..92c37d2ac815 100644
--- a/src/crash_report.zig
+++ b/src/crash_report.zig
@@ -85,7 +85,7 @@ fn dumpStatusReport() !void {
const anal = zir_state orelse return;
// Note: We have the panic mutex here, so we can safely use the global crash heap.
var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
- const allocator = &fba.allocator;
+ const allocator = fba.getAllocator();
const stderr = io.getStdErr().writer();
const block: *Sema.Block = anal.block;
diff --git a/src/glibc.zig b/src/glibc.zig
index e6e67e4f49f0..c3f2da599eff 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -65,7 +65,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
var all_versions = std.ArrayListUnmanaged(std.builtin.Version){};
var all_functions = std.ArrayListUnmanaged(Fn){};
@@ -256,7 +256,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crti_o => {
@@ -711,7 +711,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const target = comp.getTarget();
const target_version = target.os.version_range.linux.glibc;
diff --git a/src/libcxx.zig b/src/libcxx.zig
index 9986c922ba97..908df3ca2592 100644
--- a/src/libcxx.zig
+++ b/src/libcxx.zig
@@ -89,7 +89,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "c++";
const output_mode = .Lib;
@@ -236,7 +236,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "c++abi";
const output_mode = .Lib;
diff --git a/src/libtsan.zig b/src/libtsan.zig
index 57f1f8c78ee7..47089cc7790f 100644
--- a/src/libtsan.zig
+++ b/src/libtsan.zig
@@ -15,7 +15,7 @@ pub fn buildTsan(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "tsan";
const output_mode = .Lib;
diff --git a/src/libunwind.zig b/src/libunwind.zig
index 50c329c6d6d3..dabd8631b9e4 100644
--- a/src/libunwind.zig
+++ b/src/libunwind.zig
@@ -17,7 +17,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const root_name = "unwind";
const output_mode = .Lib;
diff --git a/src/link.zig b/src/link.zig
index b57be64d4279..0b191ca8daf8 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -628,7 +628,7 @@ pub const File = struct {
var arena_allocator = std.heap.ArenaAllocator.init(base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/C.zig b/src/link/C.zig
index cbd36ebab504..6bdace3fcaf3 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -128,7 +128,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
- .typedefs_arena = &self.arena.allocator,
+ .typedefs_arena = self.arena.getAllocator(),
},
.code = code.toManaged(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
@@ -193,7 +193,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promote(module.gpa),
- .typedefs_arena = &self.arena.allocator,
+ .typedefs_arena = self.arena.getAllocator(),
},
.code = code.toManaged(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index d5e3e6caa32f..00bddfe578b8 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -877,7 +877,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 6670f1a8b667..200ca488984d 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1243,7 +1243,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index bd26b64ad281..5e0e76648351 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -412,7 +412,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
@@ -5379,7 +5379,7 @@ fn snapshotState(self: *MachO) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const out_file = try emit.directory.handle.createFile("snapshots.json", .{
.truncate = self.cold_start,
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index c4c42940b85f..bc7e4d71a414 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -168,7 +168,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
try fn_map_res.value_ptr.functions.put(gpa, decl, out);
} else {
const file = decl.getFileScope();
- const arena = &self.path_arena.allocator;
+ const arena = self.path_arena.getAllocator();
// each file gets a symbol
fn_map_res.value_ptr.* = .{
.sym_index = blk: {
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index a8606ac27f7c..7ffd067596d6 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -950,7 +950,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 84257de388e3..fe5ef2af9c9f 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -120,7 +120,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as []TbdV4", .{});
const inner = lib_stub.yaml.parse([]TbdV4) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len);
+ var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, inner.len);
for (inner) |doc, i| {
out[i] = .{ .v4 = doc };
}
@@ -130,7 +130,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as TbdV4", .{});
const inner = lib_stub.yaml.parse(TbdV4) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1);
+ var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1);
out[0] = .{ .v4 = inner };
break :blk out;
}
@@ -148,7 +148,7 @@ pub const LibStub = struct {
err: {
log.debug("trying to parse as TbdV3", .{});
const inner = lib_stub.yaml.parse(TbdV3) catch break :err;
- var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1);
+ var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1);
out[0] = .{ .v3 = inner };
break :blk out;
}
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 4392befb597a..261caee717f1 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -248,15 +248,16 @@ pub const Yaml = struct {
pub fn load(allocator: Allocator, source: []const u8) !Yaml {
var arena = ArenaAllocator.init(allocator);
+ const arena_allocator = arena.getAllocator();
- var tree = Tree.init(&arena.allocator);
+ var tree = Tree.init(arena_allocator);
try tree.parse(source);
- var docs = std.ArrayList(Value).init(&arena.allocator);
+ var docs = std.ArrayList(Value).init(arena_allocator);
try docs.ensureUnusedCapacity(tree.docs.items.len);
for (tree.docs.items) |node| {
- const value = try Value.fromNode(&arena.allocator, &tree, node, null);
+ const value = try Value.fromNode(arena_allocator, &tree, node, null);
docs.appendAssumeCapacity(value);
}
@@ -299,7 +300,7 @@ pub const Yaml = struct {
.Pointer => |info| {
switch (info.size) {
.Slice => {
- var parsed = try self.arena.allocator.alloc(info.child, self.docs.items.len);
+ var parsed = try self.arena.getAllocator().alloc(info.child, self.docs.items.len);
for (self.docs.items) |doc, i| {
parsed[i] = try self.parseValue(info.child, doc);
}
@@ -361,7 +362,7 @@ pub const Yaml = struct {
inline for (struct_info.fields) |field| {
const value: ?Value = map.get(field.name) orelse blk: {
- const field_name = try mem.replaceOwned(u8, &self.arena.allocator, field.name, "_", "-");
+ const field_name = try mem.replaceOwned(u8, self.arena.getAllocator(), field.name, "_", "-");
break :blk map.get(field_name);
};
@@ -382,7 +383,7 @@ pub const Yaml = struct {
fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T {
const ptr_info = @typeInfo(T).Pointer;
- const arena = &self.arena.allocator;
+ const arena = self.arena.getAllocator();
switch (ptr_info.size) {
.Slice => {
diff --git a/src/main.zig b/src/main.zig
index 52272db8ef2c..c97415ff29e5 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -139,7 +139,7 @@ pub fn main() anyerror!void {
const gpa = gpa: {
if (!builtin.link_libc) {
gpa_need_deinit = true;
- break :gpa &general_purpose_allocator.allocator;
+ break :gpa general_purpose_allocator.getAllocator();
}
// We would prefer to use raw libc allocator here, but cannot
// use it if it won't support the alignment we need.
@@ -153,7 +153,7 @@ pub fn main() anyerror!void {
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
- const arena = &arena_instance.allocator;
+ const arena = arena_instance.getAllocator();
const args = try process.argsAlloc(arena);
@@ -3619,7 +3619,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa);
defer errors.deinit();
- try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file);
+ try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (color) {
.auto => std.debug.detectTTYConfig(),
.on => .escape_codes,
@@ -3818,7 +3818,7 @@ fn fmtPathFile(
var errors = std.ArrayList(Compilation.AllErrors.Message).init(fmt.gpa);
defer errors.deinit();
- try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file);
+ try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file);
const ttyconf: std.debug.TTY.Config = switch (fmt.color) {
.auto => std.debug.detectTTYConfig(),
.on => .escape_codes,
diff --git a/src/mingw.zig b/src/mingw.zig
index b2628553b9bf..6f02ebf39525 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crt2_o => {
@@ -281,7 +281,7 @@ fn add_cc_args(
pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const def_file_path = findDef(comp, arena, lib_name) catch |err| switch (err) {
error.FileNotFound => {
diff --git a/src/musl.zig b/src/musl.zig
index b9d00c4b124f..7c3957fdd744 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crti_o => {
diff --git a/src/print_air.zig b/src/print_air.zig
index 86fc6a63962d..ce53a26aeb78 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -47,7 +47,7 @@ pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void {
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.air = air,
.zir = zir,
.liveness = liveness,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 9532b33ccd63..996898b4ace2 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -19,7 +19,7 @@ pub fn renderAsTextToFile(
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = 0,
@@ -74,7 +74,7 @@ pub fn renderInstructionContext(
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = if (indent < 2) 2 else indent,
@@ -106,7 +106,7 @@ pub fn renderSingleInstruction(
var writer: Writer = .{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena.getAllocator(),
.file = scope_file,
.code = scope_file.zir,
.indent = indent,
diff --git a/src/stage1.zig b/src/stage1.zig
index 942be668892d..810dcc477b93 100644
--- a/src/stage1.zig
+++ b/src/stage1.zig
@@ -38,7 +38,7 @@ pub fn main(argc: c_int, argv: [*][*:0]u8) callconv(.C) c_int {
const gpa = std.heap.c_allocator;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
- const arena = &arena_instance.allocator;
+ const arena = arena_instance.getAllocator();
const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{s}", .{"OutOfMemory"});
for (args) |*arg, i| {
diff --git a/src/test.zig b/src/test.zig
index a9c1905b3636..74147069e80f 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -692,7 +692,7 @@ pub const TestContext = struct {
var arena_allocator = std.heap.ArenaAllocator.init(allocator);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 109535d0810f..570059255004 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -373,13 +373,14 @@ pub fn translate(
// from this function.
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
+ const arena_allocator = arena.getAllocator();
var context = Context{
.gpa = gpa,
- .arena = &arena.allocator,
+ .arena = arena_allocator,
.source_manager = ast_unit.getSourceManager(),
.alias_list = AliasList.init(gpa),
- .global_scope = try arena.allocator.create(Scope.Root),
+ .global_scope = try arena_allocator.create(Scope.Root),
.clang_context = ast_unit.getASTContext(),
.pattern_list = try PatternList.init(gpa),
};
diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig
index 18906cb6c710..b2235ad53ebe 100644
--- a/src/wasi_libc.zig
+++ b/src/wasi_libc.zig
@@ -67,7 +67,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
switch (crt_file) {
.crt1_reactor_o => {
diff --git a/test/cli.zig b/test/cli.zig
index 3f50ebe4039f..20a2143f51c3 100644
--- a/test/cli.zig
+++ b/test/cli.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- a = &arena.allocator;
+ a = arena.getAllocator();
const zig_exe_rel = try (arg_it.next(a) orelse {
std.debug.print("Expected first argument to be path to zig compiler\n", .{});
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 68d8f2a80730..46cbdd77f6ee 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -491,7 +491,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() !void {
\\ var allocator_buf: [10]u8 = undefined;
\\ var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
- \\ const allocator = &std.heap.loggingAllocator(&fixedBufferAllocator.allocator).allocator;
+ \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.getAllocator()).getAllocator();
\\
\\ var a = try allocator.alloc(u8, 10);
\\ a = allocator.shrink(a, 5);
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 09a165304cc3..3ed47432754b 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -6550,9 +6550,9 @@ pub fn addCases(ctx: *TestContext) !void {
ctx.objErrStage1("method call with first arg type wrong container",
\\pub const List = struct {
\\ len: usize,
- \\ allocator: *Allocator,
+ \\ allocator: Allocator,
\\
- \\ pub fn init(allocator: *Allocator) List {
+ \\ pub fn init(allocator: Allocator) List {
\\ return List {
\\ .len = 0,
\\ .allocator = allocator,
@@ -6573,7 +6573,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ x.init();
\\}
, &[_][]const u8{
- "tmp.zig:23:5: error: expected type '*Allocator', found '*List'",
+ "tmp.zig:23:5: error: expected type 'Allocator', found '*List'",
});
ctx.objErrStage1("binary not on number literal",
@@ -7569,7 +7569,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\
\\export fn entry() void {
\\ const a = MdNode.Header {
- \\ .text = MdText.init(&std.testing.allocator),
+ \\ .text = MdText.init(std.testing.allocator),
\\ .weight = HeaderWeight.H1,
\\ };
\\ _ = a;
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index 35fe6e5c6a08..f0dea39ccbb0 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -16,7 +16,7 @@ const Token = union(enum) {
};
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
-var global_allocator = &gpa.allocator;
+const global_allocator = gpa.getAllocator();
fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
diff --git a/test/standalone/cat/main.zig b/test/standalone/cat/main.zig
index 80ec97877a0d..a8b16a05ca48 100644
--- a/test/standalone/cat/main.zig
+++ b/test/standalone/cat/main.zig
@@ -8,7 +8,7 @@ const warn = std.log.warn;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
- const arena = &arena_instance.allocator;
+ const arena = arena_instance.getAllocator();
const args = try process.argsAlloc(arena);
diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig
index 0480866867b6..e4ad6927b2c5 100644
--- a/tools/gen_spirv_spec.zig
+++ b/tools/gen_spirv_spec.zig
@@ -4,7 +4,7 @@ const g = @import("spirv/grammar.zig");
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
if (args.len != 2) {
diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig
index f845c58b568d..a99d14752f31 100644
--- a/tools/gen_stubs.zig
+++ b/tools/gen_stubs.zig
@@ -25,7 +25,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const ally = &arena.allocator;
+ const ally = arena.getAllocator();
var symbols = std.ArrayList(Symbol).init(ally);
var sections = std.ArrayList([]const u8).init(ally);
diff --git a/tools/merge_anal_dumps.zig b/tools/merge_anal_dumps.zig
index 648a76ebfb63..93e067460557 100644
--- a/tools/merge_anal_dumps.zig
+++ b/tools/merge_anal_dumps.zig
@@ -9,7 +9,7 @@ pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
diff --git a/tools/process_headers.zig b/tools/process_headers.zig
index fa5fdb0042ef..1a90f99343eb 100644
--- a/tools/process_headers.zig
+++ b/tools/process_headers.zig
@@ -284,7 +284,7 @@ const LibCVendor = enum {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
var search_paths = std.ArrayList([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
diff --git a/tools/update-license-headers.zig b/tools/update-license-headers.zig
index 4cc60ca4ea8f..83c47f1481fd 100644
--- a/tools/update-license-headers.zig
+++ b/tools/update-license-headers.zig
@@ -10,7 +10,7 @@ pub fn main() !void {
defer root_node.end();
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const arena = &arena_allocator.allocator;
+ const arena = arena_allocator.getAllocator();
const args = try std.process.argsAlloc(arena);
const path_to_walk = args[1];
diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig
index c999db289991..90a96e057294 100644
--- a/tools/update_clang_options.zig
+++ b/tools/update_clang_options.zig
@@ -450,8 +450,13 @@ const cpu_targets = struct {
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
+<<<<<<< HEAD
const allocator = &arena.allocator;
+=======
+
+ const allocator = arena.getAllocator();
+>>>>>>> 11157e318 (allocgate: stage 1 and 2 building)
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 2eccb0ee1b4c..70bc5a1c74ee 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -769,7 +769,7 @@ const llvm_targets = [_]LlvmTarget{
pub fn main() anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
- const arena = &arena_state.allocator;
+ const arena = arena_state.getAllocator();
const args = try std.process.argsAlloc(arena);
if (args.len <= 1) {
@@ -845,7 +845,7 @@ fn processOneTarget(job: Job) anyerror!void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_state.deinit();
- const arena = &arena_state.allocator;
+ const arena = arena_state.getAllocator();
var progress_node = job.root_progress.start(llvm_target.zig_name, 3);
progress_node.activate();
diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig
index 6232a2e8f0c7..7cccb47e1cb7 100644
--- a/tools/update_glibc.zig
+++ b/tools/update_glibc.zig
@@ -133,7 +133,7 @@ const Function = struct {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
const in_glibc_dir = args[1]; // path to the unzipped tarball of glibc, e.g. ~/downloads/glibc-2.25
const zig_src_dir = args[2]; // path to the source checkout of zig, lib dir, e.g. ~/zig-src/lib
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 756d311ecc73..0c6c570a317c 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -48,7 +48,7 @@ const Version = struct {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.getAllocator();
const args = try std.process.argsAlloc(allocator);
From 1093b09a989edb8553e79b061bb15c5745f5d193 Mon Sep 17 00:00:00 2001
From: Lee Cannon Zig programmers can implement their own allocators by fulfilling the Allocator interface.
In order to do this one must read carefully the documentation comments in std/mem.zig and
- then supply a {#syntax#}reallocFn{#endsyntax#} and a {#syntax#}shrinkFn{#endsyntax#}.
+ then supply a {#syntax#}allocFn{#endsyntax#} and a {#syntax#}resizeFn{#endsyntax#}.
There are many example allocators to look at for inspiration. Look at std/heap.zig and
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 2e9eab0fd209..fa2536cfaa2f 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -555,7 +555,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
// Do memory limit accounting with requested sizes rather than what backing_allocator returns
// because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
- // that is impossible to guarantee after calling backing_allocator.resizeFn.
+ // that is impossible to guarantee after calling backing_allocator.vtable.resize.
const prev_req_bytes = self.total_requested_bytes;
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
@@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const result_len = if (config.never_unmap and new_size == 0)
0
else
- try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
+ try self.backing_allocator.vtable.resize(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
if (config.enable_memory_limit) {
entry.value_ptr.requested_size = new_size;
@@ -764,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const new_aligned_size = math.max(len, ptr_align);
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
- const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
+ const slice = try self.backing_allocator.vtable.alloc(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
if (config.retain_metadata and !config.never_unmap) {
@@ -1191,10 +1191,12 @@ test "double frees" {
test "bug 9995 fix, large allocs count requested size not backing size" {
// with AtLeast, buffer likely to be larger than requested, especially when shrinking
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
- var buf = try gpa.allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
+ const allocator = gpa.allocator();
+
+ var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
- buf = try gpa.allocator.reallocAtLeast(buf, 1);
+ buf = try allocator.reallocAtLeast(buf, 1);
try std.testing.expect(gpa.total_requested_bytes == 1);
- buf = try gpa.allocator.reallocAtLeast(buf, 2);
+ buf = try allocator.reallocAtLeast(buf, 2);
try std.testing.expect(gpa.total_requested_bytes == 2);
}
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index 9e5957c9366b..df3974f795ec 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -108,7 +108,7 @@ pub fn NoResize(comptime AllocatorType: type) type {
/// When the size/alignment is less than or equal to the previous allocation,
/// this function returns `error.OutOfMemory` when the allocator decides the client
/// would be better off keeping the extra alignment/size. Clients will call
-/// `resizeFn` when they require the allocator to track a new alignment/size,
+/// `vtable.resize` when they require the allocator to track a new alignment/size,
/// and so this function should only return success when the allocator considers
/// the reallocation desirable from the allocator's perspective.
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
@@ -124,7 +124,7 @@ pub fn NoResize(comptime AllocatorType: type) type {
fn reallocBytes(
self: Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
- /// `allocFn` or `resizeFn`.
+ /// `vtable.alloc` or `vtable.resize`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
old_mem: []u8,
@@ -507,7 +507,7 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
return new_buf[0..m.len :0];
}
-/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning
+/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning
/// error.OutOfMemory should be impossible.
/// This function allows a runtime `buf_align` value. Callers should generally prefer
/// to call `shrink` directly.
From 23866b1f81010277b204d6f3f5db23d020a76400 Mon Sep 17 00:00:00 2001
From: Lee Cannon