From 85de022c5671d777f62ddff254a814dab05242fc Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Fri, 29 Oct 2021 00:37:25 +0100 Subject: [PATCH 01/10] allocgate: std Allocator interface refactor --- ci/srht/update-download-page.zig | 2 +- doc/docgen.zig | 20 +- lib/std/Thread.zig | 2 +- lib/std/array_hash_map.zig | 66 +++--- lib/std/array_list.zig | 56 ++--- lib/std/ascii.zig | 4 +- lib/std/atomic/queue.zig | 6 +- lib/std/atomic/stack.zig | 6 +- lib/std/bit_set.zig | 18 +- lib/std/buf_map.zig | 2 +- lib/std/buf_set.zig | 4 +- lib/std/build.zig | 18 +- lib/std/build/InstallRawStep.zig | 4 +- lib/std/build/OptionsStep.zig | 2 +- lib/std/builtin.zig | 2 +- lib/std/child_process.zig | 16 +- lib/std/coff.zig | 6 +- lib/std/compress/gzip.zig | 6 +- lib/std/compress/zlib.zig | 6 +- lib/std/crypto/argon2.zig | 14 +- lib/std/crypto/bcrypt.zig | 4 +- lib/std/crypto/benchmark.zig | 2 +- lib/std/crypto/scrypt.zig | 16 +- lib/std/cstr.zig | 6 +- lib/std/debug.zig | 29 +-- lib/std/dwarf.zig | 16 +- lib/std/event/group.zig | 6 +- lib/std/event/loop.zig | 4 +- lib/std/event/rwlock.zig | 2 +- lib/std/fifo.zig | 4 +- lib/std/fmt.zig | 4 +- lib/std/fs.zig | 16 +- lib/std/fs/file.zig | 4 +- lib/std/fs/get_app_data_dir.zig | 2 +- lib/std/fs/path.zig | 18 +- lib/std/fs/test.zig | 50 ++-- lib/std/fs/wasi.zig | 2 +- lib/std/fs/watch.zig | 6 +- lib/std/hash/auto_hash.zig | 2 +- lib/std/hash/benchmark.zig | 2 +- lib/std/hash_map.zig | 62 ++--- lib/std/heap.zig | 262 +++++++++------------ lib/std/heap/arena_allocator.zig | 24 +- lib/std/heap/general_purpose_allocator.zig | 66 +++--- lib/std/heap/log_to_writer_allocator.zig | 27 +-- lib/std/heap/logging_allocator.zig | 22 +- lib/std/io/buffered_atomic_file.zig | 4 +- lib/std/io/peek_stream.zig | 2 +- lib/std/io/reader.zig | 6 +- lib/std/json.zig | 30 +-- lib/std/json/write_stream.zig | 4 +- lib/std/math/big/int.zig | 34 +-- lib/std/math/big/rational.zig | 2 +- lib/std/mem.zig | 55 +++-- lib/std/mem/Allocator.zig | 131 +++++++---- lib/std/multi_array_list.zig | 20 +- lib/std/net.zig | 10 +- lib/std/net/test.zig | 2 +- lib/std/os/test.zig | 19 +- lib/std/pdb.zig | 8 +- lib/std/priority_dequeue.zig | 8 +- lib/std/priority_queue.zig | 6 +- lib/std/process.zig | 40 ++-- lib/std/special/build_runner.zig | 2 +- lib/std/special/test_runner.zig | 2 +- lib/std/target.zig | 6 +- lib/std/testing.zig | 6 +- lib/std/testing/failing_allocator.zig | 23 +- lib/std/unicode.zig | 6 +- lib/std/wasm.zig | 2 +- lib/std/zig.zig | 2 +- lib/std/zig/Ast.zig | 4 +- lib/std/zig/CrossTarget.zig | 8 +- lib/std/zig/parse.zig | 4 +- lib/std/zig/parser_test.zig | 17 +- lib/std/zig/perf_test.zig | 2 +- lib/std/zig/render.zig | 48 ++-- lib/std/zig/string_literal.zig | 4 +- lib/std/zig/system.zig | 6 +- lib/std/zig/system/darwin.zig | 6 +- src/Air.zig | 2 +- src/AstGen.zig | 18 +- src/Cache.zig | 4 +- src/Compilation.zig | 44 ++-- src/Liveness.zig | 6 +- src/Module.zig | 66 +++--- src/Package.zig | 12 +- src/RangeSet.zig | 2 +- src/Sema.zig | 10 +- src/ThreadPool.zig | 4 +- src/TypedValue.zig | 4 +- src/Zir.zig | 2 +- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/aarch64/Mir.zig | 2 +- src/arch/arm/CodeGen.zig | 4 +- src/arch/arm/Mir.zig | 2 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/riscv64/Mir.zig | 2 +- src/arch/wasm/CodeGen.zig | 4 +- src/arch/wasm/Mir.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 +- src/arch/x86_64/Mir.zig | 2 +- src/codegen/c.zig | 4 +- src/codegen/llvm.zig | 16 +- src/codegen/spirv.zig | 4 +- src/glibc.zig | 18 +- src/introspect.zig | 6 +- src/libc_installation.zig | 8 +- src/link.zig | 4 +- src/link/C.zig | 6 +- src/link/Coff.zig | 6 +- src/link/Elf.zig | 8 +- src/link/MachO.zig | 10 +- src/link/MachO/Archive.zig | 10 +- src/link/MachO/Atom.zig | 4 +- src/link/MachO/CodeSignature.zig | 4 +- src/link/MachO/DebugSymbols.zig | 18 +- src/link/MachO/Dylib.zig | 32 +-- src/link/MachO/Object.zig | 22 +- src/link/MachO/Trie.zig | 18 +- src/link/MachO/commands.zig | 16 +- src/link/Plan9.zig | 4 +- src/link/SpirV.zig | 4 +- src/link/Wasm.zig | 4 +- src/link/Wasm/Atom.zig | 2 +- src/link/tapi.zig | 2 +- src/link/tapi/parse.zig | 14 +- src/link/tapi/yaml.zig | 4 +- src/main.zig | 62 ++--- src/mingw.zig | 4 +- src/musl.zig | 6 +- src/print_air.zig | 6 +- src/print_env.zig | 2 +- src/print_targets.zig | 2 +- src/print_zir.zig | 10 +- src/register_manager.zig | 2 +- src/test.zig | 2 +- src/tracy.zig | 10 +- src/translate_c.zig | 24 +- src/translate_c/ast.zig | 6 +- src/type.zig | 30 +-- src/value.zig | 92 ++++---- src/wasi_libc.zig | 8 +- test/behavior/async_fn.zig | 6 +- test/cli.zig | 2 +- tools/merge_anal_dumps.zig | 4 +- tools/update_cpu_features.zig | 6 +- tools/update_spirv_features.zig | 4 +- 148 files changed, 1092 insertions(+), 1095 deletions(-) diff --git a/ci/srht/update-download-page.zig b/ci/srht/update-download-page.zig index b91dfd0c376c..175cf0abaf66 100644 --- a/ci/srht/update-download-page.zig +++ b/ci/srht/update-download-page.zig @@ -18,7 +18,7 @@ pub fn main() !void { } fn render( - allocator: *mem.Allocator, + allocator: mem.Allocator, in_file: []const u8, out_file: []const u8, fmt: enum { diff --git a/doc/docgen.zig b/doc/docgen.zig index 1b4058ae4c9a..ed469caf9ee7 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -342,7 +342,7 @@ const Action = enum { Close, }; -fn genToc(allocator: *Allocator, tokenizer: *Tokenizer) !Toc { +fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc { var urls = std.StringHashMap(Token).init(allocator); errdefer urls.deinit(); @@ -708,7 +708,7 @@ fn genToc(allocator: *Allocator, tokenizer: *Tokenizer) !Toc { }; } -fn urlize(allocator: *Allocator, input: []const u8) ![]u8 { +fn urlize(allocator: Allocator, input: []const u8) ![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); @@ -727,7 +727,7 @@ fn urlize(allocator: *Allocator, input: []const u8) ![]u8 { return buf.toOwnedSlice(); } -fn escapeHtml(allocator: *Allocator, input: []const u8) ![]u8 { +fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); @@ -773,7 +773,7 @@ test "term color" { try testing.expectEqualSlices(u8, "AgreenB", result); } -fn termColor(allocator: *Allocator, input: []const u8) ![]u8 { +fn termColor(allocator: Allocator, input: []const u8) ![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); @@ -883,7 +883,7 @@ fn writeEscapedLines(out: anytype, text: []const u8) !void { } fn tokenizeAndPrintRaw( - allocator: *Allocator, + allocator: Allocator, docgen_tokenizer: *Tokenizer, out: anytype, source_token: Token, @@ -1137,7 +1137,7 @@ fn tokenizeAndPrintRaw( } fn tokenizeAndPrint( - allocator: *Allocator, + allocator: Allocator, docgen_tokenizer: *Tokenizer, out: anytype, source_token: Token, @@ -1146,7 +1146,7 @@ fn tokenizeAndPrint( return tokenizeAndPrintRaw(allocator, docgen_tokenizer, out, source_token, raw_src); } -fn printSourceBlock(allocator: *Allocator, docgen_tokenizer: *Tokenizer, out: anytype, syntax_block: SyntaxBlock) !void { +fn printSourceBlock(allocator: Allocator, docgen_tokenizer: *Tokenizer, out: anytype, syntax_block: SyntaxBlock) !void { const source_type = @tagName(syntax_block.source_type); try out.print("
{s}
", .{ source_type, syntax_block.name });
@@ -1188,7 +1188,7 @@ fn printShell(out: anytype, shell_content: []const u8) !void {
 }
 
 fn genHtml(
-    allocator: *Allocator,
+    allocator: Allocator,
     tokenizer: *Tokenizer,
     toc: *Toc,
     out: anytype,
@@ -1687,7 +1687,7 @@ fn genHtml(
     }
 }
 
-fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
+fn exec(allocator: Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult {
     const result = try ChildProcess.exec(.{
         .allocator = allocator,
         .argv = args,
@@ -1711,7 +1711,7 @@ fn exec(allocator: *Allocator, env_map: *std.BufMap, args: []const []const u8) !
     return result;
 }
 
-fn getBuiltinCode(allocator: *Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
+fn getBuiltinCode(allocator: Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 {
     const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "build-obj", "--show-builtin" });
     return result.stdout;
 }
diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig
index 1eafe28be2be..61f19c20d083 100644
--- a/lib/std/Thread.zig
+++ b/lib/std/Thread.zig
@@ -460,7 +460,7 @@ const WindowsThreadImpl = struct {
         errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0);
 
         const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes];
-        const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator.create(Instance) catch unreachable;
+        const instance = std.heap.FixedBufferAllocator.init(instance_bytes).getAllocator().create(Instance) catch unreachable;
         instance.* = .{
             .fn_args = args,
             .thread = .{
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index e787abf1efb7..7ebafc0a1b4b 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -79,7 +79,7 @@ pub fn ArrayHashMap(
     comptime std.hash_map.verifyContext(Context, K, K, u32);
     return struct {
         unmanaged: Unmanaged,
-        allocator: *Allocator,
+        allocator: Allocator,
         ctx: Context,
 
         /// The ArrayHashMapUnmanaged type using the same settings as this managed map.
@@ -118,12 +118,12 @@ pub fn ArrayHashMap(
         const Self = @This();
 
         /// Create an ArrayHashMap instance which will use a specified allocator.
-        pub fn init(allocator: *Allocator) Self {
+        pub fn init(allocator: Allocator) Self {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call initContext instead.");
             return initContext(allocator, undefined);
         }
-        pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+        pub fn initContext(allocator: Allocator, ctx: Context) Self {
             return .{
                 .unmanaged = .{},
                 .allocator = allocator,
@@ -383,7 +383,7 @@ pub fn ArrayHashMap(
         /// Create a copy of the hash map which can be modified separately.
         /// The copy uses the same context as this instance, but the specified
         /// allocator.
-        pub fn cloneWithAllocator(self: Self, allocator: *Allocator) !Self {
+        pub fn cloneWithAllocator(self: Self, allocator: Allocator) !Self {
             var other = try self.unmanaged.cloneContext(allocator, self.ctx);
             return other.promoteContext(allocator, self.ctx);
         }
@@ -396,7 +396,7 @@ pub fn ArrayHashMap(
         }
         /// Create a copy of the hash map which can be modified separately.
         /// The copy uses the specified allocator and context.
-        pub fn cloneWithAllocatorAndContext(self: Self, allocator: *Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
+        pub fn cloneWithAllocatorAndContext(self: Self, allocator: Allocator, ctx: anytype) !ArrayHashMap(K, V, @TypeOf(ctx), store_hash) {
             var other = try self.unmanaged.cloneContext(allocator, ctx);
             return other.promoteContext(allocator, ctx);
         }
@@ -533,12 +533,12 @@ pub fn ArrayHashMapUnmanaged(
 
         /// Convert from an unmanaged map to a managed map.  After calling this,
         /// the promoted map should no longer be used.
-        pub fn promote(self: Self, allocator: *Allocator) Managed {
+        pub fn promote(self: Self, allocator: Allocator) Managed {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
             return self.promoteContext(allocator, undefined);
         }
-        pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+        pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
             return .{
                 .unmanaged = self,
                 .allocator = allocator,
@@ -549,7 +549,7 @@ pub fn ArrayHashMapUnmanaged(
         /// Frees the backing allocation and leaves the map in an undefined state.
         /// Note that this does not free keys or values.  You must take care of that
         /// before calling this function, if it is needed.
-        pub fn deinit(self: *Self, allocator: *Allocator) void {
+        pub fn deinit(self: *Self, allocator: Allocator) void {
             self.entries.deinit(allocator);
             if (self.index_header) |header| {
                 header.free(allocator);
@@ -570,7 +570,7 @@ pub fn ArrayHashMapUnmanaged(
         }
 
         /// Clears the map and releases the backing allocation
-        pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+        pub fn clearAndFree(self: *Self, allocator: Allocator) void {
             self.entries.shrinkAndFree(allocator, 0);
             if (self.index_header) |header| {
                 header.free(allocator);
@@ -633,24 +633,24 @@ pub fn ArrayHashMapUnmanaged(
         /// Otherwise, puts a new item with undefined value, and
         /// the `Entry` pointer points to it. Caller should then initialize
         /// the value (but not the key).
-        pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+        pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
             return self.getOrPutContext(allocator, key, undefined);
         }
-        pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+        pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
             const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
             if (!gop.found_existing) {
                 gop.key_ptr.* = key;
             }
             return gop;
         }
-        pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+        pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
             return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
         }
-        pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+        pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
             self.ensureTotalCapacityContext(allocator, self.entries.len + 1, ctx) catch |err| {
                 // "If key exists this function cannot fail."
                 const index = self.getIndexAdapted(key, key_ctx) orelse return err;
@@ -731,12 +731,12 @@ pub fn ArrayHashMapUnmanaged(
             }
         }
 
-        pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !GetOrPutResult {
+        pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !GetOrPutResult {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
             return self.getOrPutValueContext(allocator, key, value, undefined);
         }
-        pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
+        pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !GetOrPutResult {
             const res = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
             if (!res.found_existing) {
                 res.key_ptr.* = key;
@@ -749,12 +749,12 @@ pub fn ArrayHashMapUnmanaged(
 
         /// Increases capacity, guaranteeing that insertions up until the
         /// `expected_count` will not cause an allocation, and therefore cannot fail.
-        pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+        pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
             if (@sizeOf(ByIndexContext) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
             return self.ensureTotalCapacityContext(allocator, new_capacity, undefined);
         }
-        pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_capacity: usize, ctx: Context) !void {
+        pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_capacity: usize, ctx: Context) !void {
             if (new_capacity <= linear_scan_max) {
                 try self.entries.ensureTotalCapacity(allocator, new_capacity);
                 return;
@@ -781,7 +781,7 @@ pub fn ArrayHashMapUnmanaged(
         /// therefore cannot fail.
         pub fn ensureUnusedCapacity(
             self: *Self,
-            allocator: *Allocator,
+            allocator: Allocator,
             additional_capacity: usize,
         ) !void {
             if (@sizeOf(ByIndexContext) != 0)
@@ -790,7 +790,7 @@ pub fn ArrayHashMapUnmanaged(
         }
         pub fn ensureUnusedCapacityContext(
             self: *Self,
-            allocator: *Allocator,
+            allocator: Allocator,
             additional_capacity: usize,
             ctx: Context,
         ) !void {
@@ -808,24 +808,24 @@ pub fn ArrayHashMapUnmanaged(
 
         /// Clobbers any existing data. To detect if a put would clobber
         /// existing data, see `getOrPut`.
-        pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+        pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
             return self.putContext(allocator, key, value, undefined);
         }
-        pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+        pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
             const result = try self.getOrPutContext(allocator, key, ctx);
             result.value_ptr.* = value;
         }
 
         /// Inserts a key-value pair into the hash map, asserting that no previous
         /// entry with the same key is already present
-        pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+        pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
             return self.putNoClobberContext(allocator, key, value, undefined);
         }
-        pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+        pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
             const result = try self.getOrPutContext(allocator, key, ctx);
             assert(!result.found_existing);
             result.value_ptr.* = value;
@@ -859,12 +859,12 @@ pub fn ArrayHashMapUnmanaged(
         }
 
         /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
-        pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+        pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
             return self.fetchPutContext(allocator, key, value, undefined);
         }
-        pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+        pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
             const gop = try self.getOrPutContext(allocator, key, ctx);
             var result: ?KV = null;
             if (gop.found_existing) {
@@ -1132,12 +1132,12 @@ pub fn ArrayHashMapUnmanaged(
 
         /// Create a copy of the hash map which can be modified separately.
         /// The copy uses the same context and allocator as this instance.
-        pub fn clone(self: Self, allocator: *Allocator) !Self {
+        pub fn clone(self: Self, allocator: Allocator) !Self {
             if (@sizeOf(ByIndexContext) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
             return self.cloneContext(allocator, undefined);
         }
-        pub fn cloneContext(self: Self, allocator: *Allocator, ctx: Context) !Self {
+        pub fn cloneContext(self: Self, allocator: Allocator, ctx: Context) !Self {
             var other: Self = .{};
             other.entries = try self.entries.clone(allocator);
             errdefer other.entries.deinit(allocator);
@@ -1152,12 +1152,12 @@ pub fn ArrayHashMapUnmanaged(
 
         /// Rebuilds the key indexes. If the underlying entries has been modified directly, users
         /// can call `reIndex` to update the indexes to account for these new entries.
-        pub fn reIndex(self: *Self, allocator: *Allocator) !void {
+        pub fn reIndex(self: *Self, allocator: Allocator) !void {
             if (@sizeOf(ByIndexContext) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call reIndexContext instead.");
             return self.reIndexContext(allocator, undefined);
         }
-        pub fn reIndexContext(self: *Self, allocator: *Allocator, ctx: Context) !void {
+        pub fn reIndexContext(self: *Self, allocator: Allocator, ctx: Context) !void {
             if (self.entries.capacity <= linear_scan_max) return;
             // We're going to rebuild the index header and replace the existing one (if any). The
             // indexes should sized such that they will be at most 60% full.
@@ -1189,12 +1189,12 @@ pub fn ArrayHashMapUnmanaged(
 
         /// Shrinks the underlying `Entry` array to `new_len` elements and discards any associated
         /// index entries. Reduces allocated capacity.
-        pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
+        pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
             if (@sizeOf(ByIndexContext) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call shrinkAndFreeContext instead.");
             return self.shrinkAndFreeContext(allocator, new_len, undefined);
         }
-        pub fn shrinkAndFreeContext(self: *Self, allocator: *Allocator, new_len: usize, ctx: Context) void {
+        pub fn shrinkAndFreeContext(self: *Self, allocator: Allocator, new_len: usize, ctx: Context) void {
             // Remove index entries from the new length onwards.
             // Explicitly choose to ONLY remove index entries and not the underlying array list
             // entries as we're going to remove them in the subsequent shrink call.
@@ -1844,7 +1844,7 @@ const IndexHeader = struct {
 
     /// Allocates an index header, and fills the entryIndexes array with empty.
     /// The distance array contents are undefined.
-    fn alloc(allocator: *Allocator, new_bit_index: u8) !*IndexHeader {
+    fn alloc(allocator: Allocator, new_bit_index: u8) !*IndexHeader {
         const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
         const index_size = hash_map.capacityIndexSize(new_bit_index);
         const nbytes = @sizeOf(IndexHeader) + index_size * len;
@@ -1858,7 +1858,7 @@ const IndexHeader = struct {
     }
 
     /// Releases the memory for a header and its associated arrays.
-    fn free(header: *IndexHeader, allocator: *Allocator) void {
+    fn free(header: *IndexHeader, allocator: Allocator) void {
         const index_size = hash_map.capacityIndexSize(header.bit_index);
         const ptr = @ptrCast([*]align(@alignOf(IndexHeader)) u8, header);
         const slice = ptr[0 .. @sizeOf(IndexHeader) + header.length() * index_size];
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 24049dad5c33..d88dae95ff73 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -42,12 +42,12 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
         /// How many T values this list can hold without allocating
         /// additional memory.
         capacity: usize,
-        allocator: *Allocator,
+        allocator: Allocator,
 
         pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
 
         /// Deinitialize with `deinit` or use `toOwnedSlice`.
-        pub fn init(allocator: *Allocator) Self {
+        pub fn init(allocator: Allocator) Self {
             return Self{
                 .items = &[_]T{},
                 .capacity = 0,
@@ -58,7 +58,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
         /// Initialize with capacity to hold at least `num` elements.
         /// The resulting capacity is likely to be equal to `num`.
         /// Deinitialize with `deinit` or use `toOwnedSlice`.
-        pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
+        pub fn initCapacity(allocator: Allocator, num: usize) !Self {
             var self = Self.init(allocator);
             try self.ensureTotalCapacityPrecise(num);
             return self;
@@ -74,7 +74,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
         /// ArrayList takes ownership of the passed in slice. The slice must have been
         /// allocated with `allocator`.
         /// Deinitialize with `deinit` or use `toOwnedSlice`.
-        pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self {
+        pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self {
             return Self{
                 .items = slice,
                 .capacity = slice.len,
@@ -457,33 +457,33 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Initialize with capacity to hold at least num elements.
         /// The resulting capacity is likely to be equal to `num`.
         /// Deinitialize with `deinit` or use `toOwnedSlice`.
-        pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
+        pub fn initCapacity(allocator: Allocator, num: usize) !Self {
             var self = Self{};
             try self.ensureTotalCapacityPrecise(allocator, num);
             return self;
         }
 
         /// Release all allocated memory.
-        pub fn deinit(self: *Self, allocator: *Allocator) void {
+        pub fn deinit(self: *Self, allocator: Allocator) void {
             allocator.free(self.allocatedSlice());
             self.* = undefined;
         }
 
         /// Convert this list into an analogous memory-managed one.
         /// The returned list has ownership of the underlying memory.
-        pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) {
+        pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) {
             return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
         }
 
         /// The caller owns the returned memory. ArrayList becomes empty.
-        pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice {
+        pub fn toOwnedSlice(self: *Self, allocator: Allocator) Slice {
             const result = allocator.shrink(self.allocatedSlice(), self.items.len);
             self.* = Self{};
             return result;
         }
 
         /// The caller owns the returned memory. ArrayList becomes empty.
-        pub fn toOwnedSliceSentinel(self: *Self, allocator: *Allocator, comptime sentinel: T) ![:sentinel]T {
+        pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) ![:sentinel]T {
             try self.append(allocator, sentinel);
             const result = self.toOwnedSlice(allocator);
             return result[0 .. result.len - 1 :sentinel];
@@ -492,7 +492,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Insert `item` at index `n`. Moves `list[n .. list.len]`
         /// to higher indices to make room.
         /// This operation is O(N).
-        pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void {
+        pub fn insert(self: *Self, allocator: Allocator, n: usize, item: T) !void {
             try self.ensureUnusedCapacity(allocator, 1);
             self.items.len += 1;
 
@@ -503,7 +503,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Insert slice `items` at index `i`. Moves `list[i .. list.len]` to
         /// higher indicices make room.
         /// This operation is O(N).
-        pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: []const T) !void {
+        pub fn insertSlice(self: *Self, allocator: Allocator, i: usize, items: []const T) !void {
             try self.ensureUnusedCapacity(allocator, items.len);
             self.items.len += items.len;
 
@@ -515,14 +515,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Grows list if `len < new_items.len`.
         /// Shrinks list if `len > new_items.len`
         /// Invalidates pointers if this ArrayList is resized.
-        pub fn replaceRange(self: *Self, allocator: *Allocator, start: usize, len: usize, new_items: []const T) !void {
+        pub fn replaceRange(self: *Self, allocator: Allocator, start: usize, len: usize, new_items: []const T) !void {
             var managed = self.toManaged(allocator);
             try managed.replaceRange(start, len, new_items);
             self.* = managed.moveToUnmanaged();
         }
 
         /// Extend the list by 1 element. Allocates more memory as necessary.
-        pub fn append(self: *Self, allocator: *Allocator, item: T) !void {
+        pub fn append(self: *Self, allocator: Allocator, item: T) !void {
             const new_item_ptr = try self.addOne(allocator);
             new_item_ptr.* = item;
         }
@@ -563,7 +563,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
         /// Append the slice of items to the list. Allocates more
         /// memory as necessary.
-        pub fn appendSlice(self: *Self, allocator: *Allocator, items: []const T) !void {
+        pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) !void {
             try self.ensureUnusedCapacity(allocator, items.len);
             self.appendSliceAssumeCapacity(items);
         }
@@ -580,7 +580,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
         pub const WriterContext = struct {
             self: *Self,
-            allocator: *Allocator,
+            allocator: Allocator,
         };
 
         pub const Writer = if (T != u8)
@@ -590,7 +590,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
             std.io.Writer(WriterContext, error{OutOfMemory}, appendWrite);
 
         /// Initializes a Writer which will append to the list.
-        pub fn writer(self: *Self, allocator: *Allocator) Writer {
+        pub fn writer(self: *Self, allocator: Allocator) Writer {
             return .{ .context = .{ .self = self, .allocator = allocator } };
         }
 
@@ -603,7 +603,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
         /// Append a value to the list `n` times.
         /// Allocates more memory as necessary.
-        pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void {
+        pub fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) !void {
             const old_len = self.items.len;
             try self.resize(allocator, self.items.len + n);
             mem.set(T, self.items[old_len..self.items.len], value);
@@ -621,13 +621,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
         /// Adjust the list's length to `new_len`.
         /// Does not initialize added items, if any.
-        pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void {
+        pub fn resize(self: *Self, allocator: Allocator, new_len: usize) !void {
             try self.ensureTotalCapacity(allocator, new_len);
             self.items.len = new_len;
         }
 
         /// Reduce allocated capacity to `new_len`.
-        pub fn shrinkAndFree(self: *Self, allocator: *Allocator, new_len: usize) void {
+        pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
             assert(new_len <= self.items.len);
 
             self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
@@ -653,7 +653,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         }
 
         /// Invalidates all element pointers.
-        pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+        pub fn clearAndFree(self: *Self, allocator: Allocator) void {
             allocator.free(self.allocatedSlice());
             self.items.len = 0;
             self.capacity = 0;
@@ -663,7 +663,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
         /// Modify the array so that it can hold at least `new_capacity` items.
         /// Invalidates pointers if additional memory is needed.
-        pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+        pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) !void {
             var better_capacity = self.capacity;
             if (better_capacity >= new_capacity) return;
 
@@ -679,7 +679,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Like `ensureTotalCapacity`, but the resulting capacity is much more likely
         /// (but not guaranteed) to be equal to `new_capacity`.
         /// Invalidates pointers if additional memory is needed.
-        pub fn ensureTotalCapacityPrecise(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
+        pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) !void {
             if (self.capacity >= new_capacity) return;
 
             const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
@@ -691,7 +691,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Invalidates pointers if additional memory is needed.
         pub fn ensureUnusedCapacity(
             self: *Self,
-            allocator: *Allocator,
+            allocator: Allocator,
             additional_count: usize,
         ) !void {
             return self.ensureTotalCapacity(allocator, self.items.len + additional_count);
@@ -706,7 +706,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
 
         /// Increase length by 1, returning pointer to the new item.
         /// The returned pointer becomes invalid when the list resized.
-        pub fn addOne(self: *Self, allocator: *Allocator) !*T {
+        pub fn addOne(self: *Self, allocator: Allocator) !*T {
             const newlen = self.items.len + 1;
             try self.ensureTotalCapacity(allocator, newlen);
             return self.addOneAssumeCapacity();
@@ -726,7 +726,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
         /// Resize the array, adding `n` new elements, which have `undefined` values.
         /// The return value is an array pointing to the newly allocated elements.
         /// The returned pointer becomes invalid when the list is resized.
-        pub fn addManyAsArray(self: *Self, allocator: *Allocator, comptime n: usize) !*[n]T {
+        pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) !*[n]T {
             const prev_len = self.items.len;
             try self.resize(allocator, self.items.len + n);
             return self.items[prev_len..][0..n];
@@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
 test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
     var arena = std.heap.ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
-    const a = &arena.allocator;
+    const a = arena.getAllocator();
 
     const init = [_]i32{ 1, 2, 3, 4, 5 };
     const new = [_]i32{ 0, 0, 0 };
@@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe
     // use an arena allocator to make sure realloc returns error.OutOfMemory
     var arena = std.heap.ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
-    const a = &arena.allocator;
+    const a = arena.getAllocator();
 
     {
         var list = ArrayList(i32).init(a);
@@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" {
 
 test "std.ArrayList(u0)" {
     // An ArrayList on zero-sized types should not need to allocate
-    const a = &testing.FailingAllocator.init(testing.allocator, 0).allocator;
+    const a = testing.FailingAllocator.init(testing.allocator, 0).getAllocator();
 
     var list = ArrayList(u0).init(a);
     defer list.deinit();
diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig
index c999162b3643..8174361800f5 100644
--- a/lib/std/ascii.zig
+++ b/lib/std/ascii.zig
@@ -301,7 +301,7 @@ test "lowerString" {
 
 /// Allocates a lower case copy of `ascii_string`.
 /// Caller owns returned string and must free with `allocator`.
-pub fn allocLowerString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
+pub fn allocLowerString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
     const result = try allocator.alloc(u8, ascii_string.len);
     return lowerString(result, ascii_string);
 }
@@ -330,7 +330,7 @@ test "upperString" {
 
 /// Allocates an upper case copy of `ascii_string`.
 /// Caller owns returned string and must free with `allocator`.
-pub fn allocUpperString(allocator: *std.mem.Allocator, ascii_string: []const u8) ![]u8 {
+pub fn allocUpperString(allocator: std.mem.Allocator, ascii_string: []const u8) ![]u8 {
     const result = try allocator.alloc(u8, ascii_string.len);
     return upperString(result, ascii_string);
 }
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 62de8d9f10dc..3b4a14110c13 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -156,7 +156,7 @@ pub fn Queue(comptime T: type) type {
 }
 
 const Context = struct {
-    allocator: *std.mem.Allocator,
+    allocator: std.mem.Allocator,
     queue: *Queue(i32),
     put_sum: isize,
     get_sum: isize,
@@ -176,8 +176,8 @@ test "std.atomic.Queue" {
     var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
     defer std.heap.page_allocator.free(plenty_of_memory);
 
-    var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
-    var a = &fixed_buffer_allocator.allocator;
+    var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
+    var a = fixed_buffer_allocator.getThreadSafeAllocator();
 
     var queue = Queue(i32).init();
     var context = Context{
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index 35f691425224..c1b368b57191 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -69,7 +69,7 @@ pub fn Stack(comptime T: type) type {
 }
 
 const Context = struct {
-    allocator: *std.mem.Allocator,
+    allocator: std.mem.Allocator,
     stack: *Stack(i32),
     put_sum: isize,
     get_sum: isize,
@@ -88,8 +88,8 @@ test "std.atomic.stack" {
     var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
     defer std.heap.page_allocator.free(plenty_of_memory);
 
-    var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
-    var a = &fixed_buffer_allocator.allocator;
+    var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory);
+    var a = fixed_buffer_allocator.getThreadSafeAllocator();
 
     var stack = Stack(i32).init();
     var context = Context{
diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig
index af960784f76e..2848305819f2 100644
--- a/lib/std/bit_set.zig
+++ b/lib/std/bit_set.zig
@@ -476,7 +476,7 @@ pub const DynamicBitSetUnmanaged = struct {
 
     /// Creates a bit set with no elements present.
     /// If bit_length is not zero, deinit must eventually be called.
-    pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+    pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
         var self = Self{};
         try self.resize(bit_length, false, allocator);
         return self;
@@ -484,7 +484,7 @@ pub const DynamicBitSetUnmanaged = struct {
 
     /// Creates a bit set with all elements present.
     /// If bit_length is not zero, deinit must eventually be called.
-    pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+    pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
         var self = Self{};
         try self.resize(bit_length, true, allocator);
         return self;
@@ -493,7 +493,7 @@ pub const DynamicBitSetUnmanaged = struct {
     /// Resizes to a new bit_length.  If the new length is larger
     /// than the old length, fills any added bits with `fill`.
     /// If new_len is not zero, deinit must eventually be called.
-    pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: *Allocator) !void {
+    pub fn resize(self: *@This(), new_len: usize, fill: bool, allocator: Allocator) !void {
         const old_len = self.bit_length;
 
         const old_masks = numMasks(old_len);
@@ -556,12 +556,12 @@ pub const DynamicBitSetUnmanaged = struct {
     /// deinitializes the array and releases its memory.
     /// The passed allocator must be the same one used for
     /// init* or resize in the past.
-    pub fn deinit(self: *Self, allocator: *Allocator) void {
+    pub fn deinit(self: *Self, allocator: Allocator) void {
         self.resize(0, false, allocator) catch unreachable;
     }
 
     /// Creates a duplicate of this bit set, using the new allocator.
-    pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+    pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
         const num_masks = numMasks(self.bit_length);
         var copy = Self{};
         try copy.resize(self.bit_length, false, new_allocator);
@@ -742,13 +742,13 @@ pub const DynamicBitSet = struct {
     pub const ShiftInt = std.math.Log2Int(MaskInt);
 
     /// The allocator used by this bit set
-    allocator: *Allocator,
+    allocator: Allocator,
 
     /// The number of valid items in this bit set
     unmanaged: DynamicBitSetUnmanaged = .{},
 
     /// Creates a bit set with no elements present.
-    pub fn initEmpty(bit_length: usize, allocator: *Allocator) !Self {
+    pub fn initEmpty(bit_length: usize, allocator: Allocator) !Self {
         return Self{
             .unmanaged = try DynamicBitSetUnmanaged.initEmpty(bit_length, allocator),
             .allocator = allocator,
@@ -756,7 +756,7 @@ pub const DynamicBitSet = struct {
     }
 
     /// Creates a bit set with all elements present.
-    pub fn initFull(bit_length: usize, allocator: *Allocator) !Self {
+    pub fn initFull(bit_length: usize, allocator: Allocator) !Self {
         return Self{
             .unmanaged = try DynamicBitSetUnmanaged.initFull(bit_length, allocator),
             .allocator = allocator,
@@ -777,7 +777,7 @@ pub const DynamicBitSet = struct {
     }
 
     /// Creates a duplicate of this bit set, using the new allocator.
-    pub fn clone(self: *const Self, new_allocator: *Allocator) !Self {
+    pub fn clone(self: *const Self, new_allocator: Allocator) !Self {
         return Self{
             .unmanaged = try self.unmanaged.clone(new_allocator),
             .allocator = new_allocator,
diff --git a/lib/std/buf_map.zig b/lib/std/buf_map.zig
index 1e4462e6aee4..5b26ae9684e9 100644
--- a/lib/std/buf_map.zig
+++ b/lib/std/buf_map.zig
@@ -14,7 +14,7 @@ pub const BufMap = struct {
     /// Create a BufMap backed by a specific allocator.
     /// That allocator will be used for both backing allocations
     /// and string deduplication.
-    pub fn init(allocator: *Allocator) BufMap {
+    pub fn init(allocator: Allocator) BufMap {
         var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
         return self;
     }
diff --git a/lib/std/buf_set.zig b/lib/std/buf_set.zig
index ce2d51b0569c..e68b24fbcc13 100644
--- a/lib/std/buf_set.zig
+++ b/lib/std/buf_set.zig
@@ -16,7 +16,7 @@ pub const BufSet = struct {
     /// Create a BufSet using an allocator.  The allocator will
     /// be used internally for both backing allocations and
     /// string duplication.
-    pub fn init(a: *Allocator) BufSet {
+    pub fn init(a: Allocator) BufSet {
         var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
         return self;
     }
@@ -67,7 +67,7 @@ pub const BufSet = struct {
     }
 
     /// Get the allocator used by this set
-    pub fn allocator(self: *const BufSet) *Allocator {
+    pub fn allocator(self: *const BufSet) Allocator {
         return self.hash_map.allocator;
     }
 
diff --git a/lib/std/build.zig b/lib/std/build.zig
index 3f6c7aa94340..dba27f86b9c5 100644
--- a/lib/std/build.zig
+++ b/lib/std/build.zig
@@ -28,7 +28,7 @@ pub const OptionsStep = @import("build/OptionsStep.zig");
 pub const Builder = struct {
     install_tls: TopLevelStep,
     uninstall_tls: TopLevelStep,
-    allocator: *Allocator,
+    allocator: Allocator,
     user_input_options: UserInputOptionsMap,
     available_options_map: AvailableOptionsMap,
     available_options_list: ArrayList(AvailableOption),
@@ -134,7 +134,7 @@ pub const Builder = struct {
     };
 
     pub fn create(
-        allocator: *Allocator,
+        allocator: Allocator,
         zig_exe: []const u8,
         build_root: []const u8,
         cache_root: []const u8,
@@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" {
     defer arena.deinit();
 
     const builder = try Builder.create(
-        &arena.allocator,
+        arena.getAllocator(),
         "zig",
         "zig-cache",
         "zig-cache",
@@ -3077,7 +3077,7 @@ pub const Step = struct {
         custom,
     };
 
-    pub fn init(id: Id, name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step {
+    pub fn init(id: Id, name: []const u8, allocator: Allocator, makeFn: fn (*Step) anyerror!void) Step {
         return Step{
             .id = id,
             .name = allocator.dupe(u8, name) catch unreachable,
@@ -3087,7 +3087,7 @@ pub const Step = struct {
             .done_flag = false,
         };
     }
-    pub fn initNoOp(id: Id, name: []const u8, allocator: *Allocator) Step {
+    pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
         return init(id, name, allocator, makeNoOp);
     }
 
@@ -3114,7 +3114,7 @@ pub const Step = struct {
     }
 };
 
-fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
     const out_dir = fs.path.dirname(output_path) orelse ".";
     const out_basename = fs.path.basename(output_path);
     // sym link for libfoo.so.1 to libfoo.so.1.2.3
@@ -3138,7 +3138,7 @@ fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_maj
 }
 
 /// Returned slice must be freed by the caller.
-fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 {
+fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
     const appdata_path = try fs.getAppDataDir(allocator, "vcpkg");
     defer allocator.free(appdata_path);
 
@@ -3207,7 +3207,7 @@ test "Builder.dupePkg()" {
     var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
     defer arena.deinit();
     var builder = try Builder.create(
-        &arena.allocator,
+        arena.getAllocator(),
         "test",
         "test",
         "test",
@@ -3252,7 +3252,7 @@ test "LibExeObjStep.addPackage" {
     defer arena.deinit();
 
     var builder = try Builder.create(
-        &arena.allocator,
+        arena.getAllocator(),
         "test",
         "test",
         "test",
diff --git a/lib/std/build/InstallRawStep.zig b/lib/std/build/InstallRawStep.zig
index d87ff2fffdde..0f921d6622c5 100644
--- a/lib/std/build/InstallRawStep.zig
+++ b/lib/std/build/InstallRawStep.zig
@@ -40,7 +40,7 @@ const BinaryElfOutput = struct {
         self.segments.deinit();
     }
 
-    pub fn parse(allocator: *Allocator, elf_file: File) !Self {
+    pub fn parse(allocator: Allocator, elf_file: File) !Self {
         var self: Self = .{
             .segments = ArrayList(*BinaryElfSegment).init(allocator),
             .sections = ArrayList(*BinaryElfSection).init(allocator),
@@ -298,7 +298,7 @@ fn containsValidAddressRange(segments: []*BinaryElfSegment) bool {
     return true;
 }
 
-fn emitRaw(allocator: *Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
+fn emitRaw(allocator: Allocator, elf_path: []const u8, raw_path: []const u8, format: RawFormat) !void {
     var elf_file = try fs.cwd().openFile(elf_path, .{});
     defer elf_file.close();
 
diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig
index dfe512adecb9..d3ac0d419628 100644
--- a/lib/std/build/OptionsStep.zig
+++ b/lib/std/build/OptionsStep.zig
@@ -274,7 +274,7 @@ test "OptionsStep" {
     var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
     defer arena.deinit();
     var builder = try Builder.create(
-        &arena.allocator,
+        arena.getAllocator(),
         "test",
         "test",
         "test",
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index e7cf56f39d6b..e0acc237d946 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -75,7 +75,7 @@ pub const StackTrace = struct {
         };
         const tty_config = std.debug.detectTTYConfig();
         try writer.writeAll("\n");
-        std.debug.writeStackTrace(self, writer, &arena.allocator, debug_info, tty_config) catch |err| {
+        std.debug.writeStackTrace(self, writer, arena.getAllocator(), debug_info, tty_config) catch |err| {
             try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
         };
         try writer.writeAll("\n");
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 2e1dfad00a95..cc9f1b28018d 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -23,7 +23,7 @@ pub const ChildProcess = struct {
     handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
     thread_handle: if (builtin.os.tag == .windows) windows.HANDLE else void,
 
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
 
     stdin: ?File,
     stdout: ?File,
@@ -90,7 +90,7 @@ pub const ChildProcess = struct {
 
     /// First argument in argv is the executable.
     /// On success must call deinit.
-    pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
+    pub fn init(argv: []const []const u8, allocator: mem.Allocator) !*ChildProcess {
         const child = try allocator.create(ChildProcess);
         child.* = ChildProcess{
             .allocator = allocator,
@@ -329,7 +329,7 @@ pub const ChildProcess = struct {
     /// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
     /// If it succeeds, the caller owns result.stdout and result.stderr memory.
     pub fn exec(args: struct {
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         argv: []const []const u8,
         cwd: ?[]const u8 = null,
         cwd_dir: ?fs.Dir = null,
@@ -541,7 +541,7 @@ pub const ChildProcess = struct {
 
         var arena_allocator = std.heap.ArenaAllocator.init(self.allocator);
         defer arena_allocator.deinit();
-        const arena = &arena_allocator.allocator;
+        const arena = arena_allocator.getAllocator();
 
         // The POSIX standard does not allow malloc() between fork() and execve(),
         // and `self.allocator` may be a libc allocator.
@@ -931,7 +931,7 @@ fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u1
 }
 
 /// Caller must dealloc.
-fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![:0]u8 {
+fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![:0]u8 {
     var buf = std.ArrayList(u8).init(allocator);
     defer buf.deinit();
 
@@ -1081,7 +1081,7 @@ fn readIntFd(fd: i32) !ErrInt {
 }
 
 /// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 {
+pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const BufMap) ![]u16 {
     // count bytes needed
     const max_chars_needed = x: {
         var max_chars_needed: usize = 4; // 4 for the final 4 null bytes
@@ -1117,7 +1117,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
     return allocator.shrink(result, i);
 }
 
-pub fn createNullDelimitedEnvMap(arena: *mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
+pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const std.BufMap) ![:null]?[*:0]u8 {
     const envp_count = env_map.count();
     const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null);
     {
@@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" {
 
     var arena = std.heap.ArenaAllocator.init(allocator);
     defer arena.deinit();
-    const environ = try createNullDelimitedEnvMap(&arena.allocator, &envmap);
+    const environ = try createNullDelimitedEnvMap(arena.getAllocator(), &envmap);
 
     try testing.expectEqual(@as(usize, 5), environ.len);
 
diff --git a/lib/std/coff.zig b/lib/std/coff.zig
index 961cd8ade6de..2bf0b1c44e35 100644
--- a/lib/std/coff.zig
+++ b/lib/std/coff.zig
@@ -98,7 +98,7 @@ pub const CoffError = error{
 // Official documentation of the format: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
 pub const Coff = struct {
     in_file: File,
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
 
     coff_header: CoffHeader,
     pe_header: OptionalHeader,
@@ -107,7 +107,7 @@ pub const Coff = struct {
     guid: [16]u8,
     age: u32,
 
-    pub fn init(allocator: *mem.Allocator, in_file: File) Coff {
+    pub fn init(allocator: mem.Allocator, in_file: File) Coff {
         return Coff{
             .in_file = in_file,
             .allocator = allocator,
@@ -324,7 +324,7 @@ pub const Coff = struct {
     }
 
     // Return an owned slice full of the section data
-    pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: *mem.Allocator) ![]u8 {
+    pub fn getSectionData(self: *Coff, comptime name: []const u8, allocator: mem.Allocator) ![]u8 {
         const sec = for (self.sections.items) |*sec| {
             if (mem.eql(u8, sec.header.name[0..name.len], name)) {
                 break sec;
diff --git a/lib/std/compress/gzip.zig b/lib/std/compress/gzip.zig
index 497b07d905e2..491b888812fc 100644
--- a/lib/std/compress/gzip.zig
+++ b/lib/std/compress/gzip.zig
@@ -24,7 +24,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
             error{ CorruptedData, WrongChecksum };
         pub const Reader = io.Reader(*Self, Error, read);
 
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         inflater: deflate.InflateStream(ReaderType),
         in_reader: ReaderType,
         hasher: std.hash.Crc32,
@@ -37,7 +37,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
             modification_time: u32,
         },
 
-        fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+        fn init(allocator: mem.Allocator, source: ReaderType) !Self {
             // gzip header format is specified in RFC1952
             const header = try source.readBytesNoEof(10);
 
@@ -152,7 +152,7 @@ pub fn GzipStream(comptime ReaderType: type) type {
     };
 }
 
-pub fn gzipStream(allocator: *mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
+pub fn gzipStream(allocator: mem.Allocator, reader: anytype) !GzipStream(@TypeOf(reader)) {
     return GzipStream(@TypeOf(reader)).init(allocator, reader);
 }
 
diff --git a/lib/std/compress/zlib.zig b/lib/std/compress/zlib.zig
index f0f4ca2ff4d9..09d9c18e7248 100644
--- a/lib/std/compress/zlib.zig
+++ b/lib/std/compress/zlib.zig
@@ -17,13 +17,13 @@ pub fn ZlibStream(comptime ReaderType: type) type {
             error{ WrongChecksum, Unsupported };
         pub const Reader = io.Reader(*Self, Error, read);
 
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         inflater: deflate.InflateStream(ReaderType),
         in_reader: ReaderType,
         hasher: std.hash.Adler32,
         window_slice: []u8,
 
-        fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
+        fn init(allocator: mem.Allocator, source: ReaderType) !Self {
             // Zlib header format is specified in RFC1950
             const header = try source.readBytesNoEof(2);
 
@@ -88,7 +88,7 @@ pub fn ZlibStream(comptime ReaderType: type) type {
     };
 }
 
-pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
+pub fn zlibStream(allocator: mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
     return ZlibStream(@TypeOf(reader)).init(allocator, reader);
 }
 
diff --git a/lib/std/crypto/argon2.zig b/lib/std/crypto/argon2.zig
index 66cd8b38f117..493f36ca94c0 100644
--- a/lib/std/crypto/argon2.zig
+++ b/lib/std/crypto/argon2.zig
@@ -201,7 +201,7 @@ fn initBlocks(
 }
 
 fn processBlocks(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     blocks: *Blocks,
     time: u32,
     memory: u32,
@@ -240,7 +240,7 @@ fn processBlocksSt(
 }
 
 fn processBlocksMt(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     blocks: *Blocks,
     time: u32,
     memory: u32,
@@ -480,7 +480,7 @@ fn indexAlpha(
 ///
 /// Salt has to be at least 8 bytes length.
 pub fn kdf(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     derived_key: []u8,
     password: []const u8,
     salt: []const u8,
@@ -524,7 +524,7 @@ const PhcFormatHasher = struct {
     };
 
     pub fn create(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         password: []const u8,
         params: Params,
         mode: Mode,
@@ -550,7 +550,7 @@ const PhcFormatHasher = struct {
     }
 
     pub fn verify(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         str: []const u8,
         password: []const u8,
     ) HasherError!void {
@@ -579,7 +579,7 @@ const PhcFormatHasher = struct {
 ///
 /// Only phc encoding is supported.
 pub const HashOptions = struct {
-    allocator: ?*mem.Allocator,
+    allocator: ?mem.Allocator,
     params: Params,
     mode: Mode = .argon2id,
     encoding: pwhash.Encoding = .phc,
@@ -609,7 +609,7 @@ pub fn strHash(
 ///
 /// Allocator is required for argon2.
 pub const VerifyOptions = struct {
-    allocator: ?*mem.Allocator,
+    allocator: ?mem.Allocator,
 };
 
 /// Verify that a previously computed hash is valid for a given password.
diff --git a/lib/std/crypto/bcrypt.zig b/lib/std/crypto/bcrypt.zig
index d8c4d67453c7..bd3c9ca7d4e7 100644
--- a/lib/std/crypto/bcrypt.zig
+++ b/lib/std/crypto/bcrypt.zig
@@ -368,7 +368,7 @@ const CryptFormatHasher = struct {
 
 /// Options for hashing a password.
 pub const HashOptions = struct {
-    allocator: ?*mem.Allocator = null,
+    allocator: ?mem.Allocator = null,
     params: Params,
     encoding: pwhash.Encoding,
 };
@@ -394,7 +394,7 @@ pub fn strHash(
 
 /// Options for hash verification.
 pub const VerifyOptions = struct {
-    allocator: ?*mem.Allocator = null,
+    allocator: ?mem.Allocator = null,
 };
 
 /// Verify that a previously computed hash is valid for a given password.
diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig
index 4836de032e0d..52e56ddf18be 100644
--- a/lib/std/crypto/benchmark.zig
+++ b/lib/std/crypto/benchmark.zig
@@ -363,7 +363,7 @@ pub fn main() !void {
 
     var buffer: [1024]u8 = undefined;
     var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
-    const args = try std.process.argsAlloc(&fixed.allocator);
+    const args = try std.process.argsAlloc(fixed.getAllocator());
 
     var filter: ?[]u8 = "";
 
diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig
index b17952dcd6a9..e464cca28eaa 100644
--- a/lib/std/crypto/scrypt.zig
+++ b/lib/std/crypto/scrypt.zig
@@ -161,7 +161,7 @@ pub const Params = struct {
 ///
 /// scrypt is defined in RFC 7914.
 ///
-/// allocator: *mem.Allocator.
+/// allocator: mem.Allocator.
 ///
 /// derived_key: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length.
 ///              May be uninitialized. All bytes will be overwritten.
@@ -173,7 +173,7 @@ pub const Params = struct {
 ///
 /// params: Params.
 pub fn kdf(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     derived_key: []u8,
     password: []const u8,
     salt: []const u8,
@@ -406,7 +406,7 @@ const PhcFormatHasher = struct {
 
     /// Return a non-deterministic hash of the password encoded as a PHC-format string
     pub fn create(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         password: []const u8,
         params: Params,
         buf: []u8,
@@ -429,7 +429,7 @@ const PhcFormatHasher = struct {
 
     /// Verify a password against a PHC-format encoded string
     pub fn verify(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         str: []const u8,
         password: []const u8,
     ) HasherError!void {
@@ -455,7 +455,7 @@ const CryptFormatHasher = struct {
 
     /// Return a non-deterministic hash of the password encoded into the modular crypt format
     pub fn create(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         password: []const u8,
         params: Params,
         buf: []u8,
@@ -478,7 +478,7 @@ const CryptFormatHasher = struct {
 
     /// Verify a password against a string in modular crypt format
     pub fn verify(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         str: []const u8,
         password: []const u8,
     ) HasherError!void {
@@ -497,7 +497,7 @@ const CryptFormatHasher = struct {
 ///
 /// Allocator is required for scrypt.
 pub const HashOptions = struct {
-    allocator: ?*mem.Allocator,
+    allocator: ?mem.Allocator,
     params: Params,
     encoding: pwhash.Encoding,
 };
@@ -520,7 +520,7 @@ pub fn strHash(
 ///
 /// Allocator is required for scrypt.
 pub const VerifyOptions = struct {
-    allocator: ?*mem.Allocator,
+    allocator: ?mem.Allocator,
 };
 
 /// Verify that a previously computed hash is valid for a given password.
diff --git a/lib/std/cstr.zig b/lib/std/cstr.zig
index 64beb378d991..068fc419ac57 100644
--- a/lib/std/cstr.zig
+++ b/lib/std/cstr.zig
@@ -33,7 +33,7 @@ fn testCStrFnsImpl() !void {
 
 /// Returns a mutable, null-terminated slice with the same length as `slice`.
 /// Caller owns the returned memory.
-pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![:0]u8 {
+pub fn addNullByte(allocator: mem.Allocator, slice: []const u8) ![:0]u8 {
     const result = try allocator.alloc(u8, slice.len + 1);
     mem.copy(u8, result, slice);
     result[slice.len] = 0;
@@ -48,13 +48,13 @@ test "addNullByte" {
 }
 
 pub const NullTerminated2DArray = struct {
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     byte_count: usize,
     ptr: ?[*:null]?[*:0]u8,
 
     /// Takes N lists of strings, concatenates the lists together, and adds a null terminator
     /// Caller must deinit result
-    pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+    pub fn fromSlices(allocator: mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
         var new_len: usize = 1; // 1 for the list null
         var byte_count: usize = 0;
         for (slices) |slice| {
diff --git a/lib/std/debug.zig b/lib/std/debug.zig
index 531872581aa1..b6990d675dc1 100644
--- a/lib/std/debug.zig
+++ b/lib/std/debug.zig
@@ -29,7 +29,7 @@ pub const LineInfo = struct {
     line: u64,
     column: u64,
     file_name: []const u8,
-    allocator: ?*mem.Allocator,
+    allocator: ?mem.Allocator,
 
     pub fn deinit(self: LineInfo) void {
         const allocator = self.allocator orelse return;
@@ -339,7 +339,7 @@ const RESET = "\x1b[0m";
 pub fn writeStackTrace(
     stack_trace: std.builtin.StackTrace,
     out_stream: anytype,
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     debug_info: *DebugInfo,
     tty_config: TTY.Config,
 ) !void {
@@ -662,7 +662,7 @@ pub const OpenSelfDebugInfoError = error{
 };
 
 /// TODO resources https://github.com/ziglang/zig/issues/4353
-pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
+pub fn openSelfDebugInfo(allocator: mem.Allocator) anyerror!DebugInfo {
     nosuspend {
         if (builtin.strip_debug_info)
             return error.MissingDebugInfo;
@@ -688,7 +688,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
 /// it themselves, even on error.
 /// TODO resources https://github.com/ziglang/zig/issues/4353
 /// TODO it's weird to take ownership even on error, rework this code.
-fn readCoffDebugInfo(allocator: *mem.Allocator, coff_file: File) !ModuleDebugInfo {
+fn readCoffDebugInfo(allocator: mem.Allocator, coff_file: File) !ModuleDebugInfo {
     nosuspend {
         errdefer coff_file.close();
 
@@ -755,7 +755,7 @@ fn chopSlice(ptr: []const u8, offset: u64, size: u64) ![]const u8 {
 /// it themselves, even on error.
 /// TODO resources https://github.com/ziglang/zig/issues/4353
 /// TODO it's weird to take ownership even on error, rework this code.
-pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugInfo {
+pub fn readElfDebugInfo(allocator: mem.Allocator, elf_file: File) !ModuleDebugInfo {
     nosuspend {
         const mapped_mem = try mapWholeFile(elf_file);
         const hdr = @ptrCast(*const elf.Ehdr, &mapped_mem[0]);
@@ -827,7 +827,7 @@ pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugI
 /// This takes ownership of macho_file: users of this function should not close
 /// it themselves, even on error.
 /// TODO it's weird to take ownership even on error, rework this code.
-fn readMachODebugInfo(allocator: *mem.Allocator, macho_file: File) !ModuleDebugInfo {
+fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugInfo {
     const mapped_mem = try mapWholeFile(macho_file);
 
     const hdr = @ptrCast(
@@ -1025,10 +1025,10 @@ fn mapWholeFile(file: File) ![]align(mem.page_size) const u8 {
 }
 
 pub const DebugInfo = struct {
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     address_map: std.AutoHashMap(usize, *ModuleDebugInfo),
 
-    pub fn init(allocator: *mem.Allocator) DebugInfo {
+    pub fn init(allocator: mem.Allocator) DebugInfo {
         return DebugInfo{
             .allocator = allocator,
             .address_map = std.AutoHashMap(usize, *ModuleDebugInfo).init(allocator),
@@ -1278,7 +1278,7 @@ pub const ModuleDebugInfo = switch (native_os) {
             addr_table: std.StringHashMap(u64),
         };
 
-        pub fn allocator(self: @This()) *mem.Allocator {
+        pub fn allocator(self: @This()) mem.Allocator {
             return self.ofiles.allocator;
         }
 
@@ -1470,7 +1470,7 @@ pub const ModuleDebugInfo = switch (native_os) {
         debug_data: PdbOrDwarf,
         coff: *coff.Coff,
 
-        pub fn allocator(self: @This()) *mem.Allocator {
+        pub fn allocator(self: @This()) mem.Allocator {
             return self.coff.allocator;
         }
 
@@ -1560,14 +1560,15 @@ fn getSymbolFromDwarf(address: u64, di: *DW.DwarfInfo) !SymbolInfo {
 }
 
 /// TODO multithreaded awareness
-var debug_info_allocator: ?*mem.Allocator = null;
+var debug_info_allocator: ?mem.Allocator = null;
 var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
-fn getDebugInfoAllocator() *mem.Allocator {
+fn getDebugInfoAllocator() mem.Allocator {
     if (debug_info_allocator) |a| return a;
 
     debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
-    debug_info_allocator = &debug_info_arena_allocator.allocator;
-    return &debug_info_arena_allocator.allocator;
+    const allocator = debug_info_arena_allocator.getAllocator();
+    debug_info_allocator = allocator;
+    return allocator;
 }
 
 /// Whether or not the current target can print useful debug information when a segfault occurs.
diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig
index 26031be662c3..eb204d15eef0 100644
--- a/lib/std/dwarf.zig
+++ b/lib/std/dwarf.zig
@@ -466,7 +466,7 @@ fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool)
 }
 
 // TODO the nosuspends here are workarounds
-fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
+fn readAllocBytes(allocator: mem.Allocator, in_stream: anytype, size: usize) ![]u8 {
     const buf = try allocator.alloc(u8, size);
     errdefer allocator.free(buf);
     if ((try nosuspend in_stream.read(buf)) < size) return error.EndOfFile;
@@ -481,18 +481,18 @@ fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64
         @as(u64, try in_stream.readInt(u32, endian));
 }
 
-fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: mem.Allocator, in_stream: anytype, size: usize) !FormValue {
     const buf = try readAllocBytes(allocator, in_stream, size);
     return FormValue{ .Block = buf };
 }
 
 // TODO the nosuspends here are workarounds
-fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
     const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
     return parseFormValueBlockLen(allocator, in_stream, block_len);
 }
 
-fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
+fn parseFormValueConstant(allocator: mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
     _ = allocator;
     // TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
     // `nosuspend` should be removed from all the function calls once it is fixed.
@@ -520,7 +520,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
 }
 
 // TODO the nosuspends here are workarounds
-fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
+fn parseFormValueRef(allocator: mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
     _ = allocator;
     return FormValue{
         .Ref = switch (size) {
@@ -535,7 +535,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.
 }
 
 // TODO the nosuspends here are workarounds
-fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
+fn parseFormValue(allocator: mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
     return switch (form_id) {
         FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
         FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
@@ -604,7 +604,7 @@ pub const DwarfInfo = struct {
     compile_unit_list: ArrayList(CompileUnit) = undefined,
     func_list: ArrayList(Func) = undefined,
 
-    pub fn allocator(self: DwarfInfo) *mem.Allocator {
+    pub fn allocator(self: DwarfInfo) mem.Allocator {
         return self.abbrev_table_list.allocator;
     }
 
@@ -1092,7 +1092,7 @@ pub const DwarfInfo = struct {
 /// the DwarfInfo fields before calling. These fields can be left undefined:
 /// * abbrev_table_list
 /// * compile_unit_list
-pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void {
+pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
     di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator);
     di.compile_unit_list = ArrayList(CompileUnit).init(allocator);
     di.func_list = ArrayList(Func).init(allocator);
diff --git a/lib/std/event/group.zig b/lib/std/event/group.zig
index 599e8d9496ba..85eeeaf2b7c3 100644
--- a/lib/std/event/group.zig
+++ b/lib/std/event/group.zig
@@ -15,7 +15,7 @@ pub fn Group(comptime ReturnType: type) type {
         frame_stack: Stack,
         alloc_stack: AllocStack,
         lock: Lock,
-        allocator: *Allocator,
+        allocator: Allocator,
 
         const Self = @This();
 
@@ -31,7 +31,7 @@ pub fn Group(comptime ReturnType: type) type {
             handle: anyframe->ReturnType,
         };
 
-        pub fn init(allocator: *Allocator) Self {
+        pub fn init(allocator: Allocator) Self {
             return Self{
                 .frame_stack = Stack.init(),
                 .alloc_stack = AllocStack.init(),
@@ -127,7 +127,7 @@ test "std.event.Group" {
 
     _ = async testGroup(std.heap.page_allocator);
 }
-fn testGroup(allocator: *Allocator) callconv(.Async) void {
+fn testGroup(allocator: Allocator) callconv(.Async) void {
     var count: usize = 0;
     var group = Group(void).init(allocator);
     var sleep_a_little_frame = async sleepALittle(&count);
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 042c8bc3ccbb..413b23cd48af 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -727,7 +727,7 @@ pub const Loop = struct {
     /// with `allocator` and freed when the function returns.
     /// `func` must return void and it can be an async function.
     /// Yields to the event loop, running the function on the next tick.
-    pub fn runDetached(self: *Loop, alloc: *mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
+    pub fn runDetached(self: *Loop, alloc: mem.Allocator, comptime func: anytype, args: anytype) error{OutOfMemory}!void {
         if (!std.io.is_async) @compileError("Can't use runDetached in non-async mode!");
         if (@TypeOf(@call(.{}, func, args)) != void) {
             @compileError("`func` must not have a return value");
@@ -735,7 +735,7 @@ pub const Loop = struct {
 
         const Wrapper = struct {
             const Args = @TypeOf(args);
-            fn run(func_args: Args, loop: *Loop, allocator: *mem.Allocator) void {
+            fn run(func_args: Args, loop: *Loop, allocator: mem.Allocator) void {
                 loop.beginOneEvent();
                 loop.yield();
                 @call(.{}, func, func_args); // compile error when called with non-void ret type
diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig
index fd42842a3a97..c19330d5a922 100644
--- a/lib/std/event/rwlock.zig
+++ b/lib/std/event/rwlock.zig
@@ -226,7 +226,7 @@ test "std.event.RwLock" {
     const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
     try testing.expectEqualSlices(i32, expected_result, shared_test_data);
 }
-fn testLock(allocator: *Allocator, lock: *RwLock) callconv(.Async) void {
+fn testLock(allocator: Allocator, lock: *RwLock) callconv(.Async) void {
     var read_nodes: [100]Loop.NextTickNode = undefined;
     for (read_nodes) |*read_node| {
         const frame = allocator.create(@Frame(readRunner)) catch @panic("memory");
diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig
index d5b6285c00d3..b7c8f761d3dd 100644
--- a/lib/std/fifo.zig
+++ b/lib/std/fifo.zig
@@ -33,7 +33,7 @@ pub fn LinearFifo(
     };
 
     return struct {
-        allocator: if (buffer_type == .Dynamic) *Allocator else void,
+        allocator: if (buffer_type == .Dynamic) Allocator else void,
         buf: if (buffer_type == .Static) [buffer_type.Static]T else []T,
         head: usize,
         count: usize,
@@ -69,7 +69,7 @@ pub fn LinearFifo(
                 }
             },
             .Dynamic => struct {
-                pub fn init(allocator: *Allocator) Self {
+                pub fn init(allocator: Allocator) Self {
                     return .{
                         .allocator = allocator,
                         .buf = &[_]T{},
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 24f5daa09592..97dfcc78ba30 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -1803,7 +1803,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) u64 {
 
 pub const AllocPrintError = error{OutOfMemory};
 
-pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
+pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![]u8 {
     const size = math.cast(usize, count(fmt, args)) catch |err| switch (err) {
         // Output too long. Can't possibly allocate enough memory to display it.
         error.Overflow => return error.OutOfMemory,
@@ -1816,7 +1816,7 @@ pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: any
 
 pub const allocPrint0 = @compileError("deprecated; use allocPrintZ");
 
-pub fn allocPrintZ(allocator: *mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
+pub fn allocPrintZ(allocator: mem.Allocator, comptime fmt: []const u8, args: anytype) AllocPrintError![:0]u8 {
     const result = try allocPrint(allocator, fmt ++ "\x00", args);
     return result[0 .. result.len - 1 :0];
 }
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index c10ded3bdc2c..4d900d2e6756 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -64,7 +64,7 @@ pub const need_async_thread = std.io.is_async and switch (builtin.os.tag) {
 };
 
 /// TODO remove the allocator requirement from this API
-pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
     if (cwd().symLink(existing_path, new_path, .{})) {
         return;
     } else |err| switch (err) {
@@ -875,7 +875,7 @@ pub const Dir = struct {
     /// Must call `Walker.deinit` when done.
     /// The order of returned file system entries is undefined.
     /// `self` will not be closed after walking it.
-    pub fn walk(self: Dir, allocator: *Allocator) !Walker {
+    pub fn walk(self: Dir, allocator: Allocator) !Walker {
         var name_buffer = std.ArrayList(u8).init(allocator);
         errdefer name_buffer.deinit();
 
@@ -1393,7 +1393,7 @@ pub const Dir = struct {
 
     /// Same as `Dir.realpath` except caller must free the returned memory.
     /// See also `Dir.realpath`.
-    pub fn realpathAlloc(self: Dir, allocator: *Allocator, pathname: []const u8) ![]u8 {
+    pub fn realpathAlloc(self: Dir, allocator: Allocator, pathname: []const u8) ![]u8 {
         // Use of MAX_PATH_BYTES here is valid as the realpath function does not
         // have a variant that takes an arbitrary-size buffer.
         // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
@@ -1804,7 +1804,7 @@ pub const Dir = struct {
 
     /// On success, caller owns returned buffer.
     /// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
-    pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
+    pub fn readFileAlloc(self: Dir, allocator: mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
         return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
     }
 
@@ -1815,7 +1815,7 @@ pub const Dir = struct {
     /// Allows specifying alignment and a sentinel value.
     pub fn readFileAllocOptions(
         self: Dir,
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         file_path: []const u8,
         max_bytes: usize,
         size_hint: ?usize,
@@ -2464,7 +2464,7 @@ pub const SelfExePathError = os.ReadLinkError || os.SysCtlError || os.RealPathEr
 
 /// `selfExePath` except allocates the result on the heap.
 /// Caller owns returned memory.
-pub fn selfExePathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExePathAlloc(allocator: Allocator) ![]u8 {
     // Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
     // system, readlink will completely fail to return a result larger than
     // PATH_MAX even if given a sufficiently large buffer. This makes it
@@ -2573,7 +2573,7 @@ pub fn selfExePathW() [:0]const u16 {
 
 /// `selfExeDirPath` except allocates the result on the heap.
 /// Caller owns returned memory.
-pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 {
     // Use of MAX_PATH_BYTES here is justified as, at least on one tested Linux
     // system, readlink will completely fail to return a result larger than
     // PATH_MAX even if given a sufficiently large buffer. This makes it
@@ -2596,7 +2596,7 @@ pub fn selfExeDirPath(out_buffer: []u8) SelfExePathError![]const u8 {
 
 /// `realpath`, except caller must free the returned memory.
 /// See also `Dir.realpath`.
-pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
+pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
     // Use of MAX_PATH_BYTES here is valid as the realpath function does not
     // have a variant that takes an arbitrary-size buffer.
     // TODO(#4812): Consider reimplementing realpath or using the POSIX.1-2008
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index 268de8f3c814..6fa46579fd49 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -420,7 +420,7 @@ pub const File = struct {
     /// Reads all the bytes from the current position to the end of the file.
     /// On success, caller owns returned buffer.
     /// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
-    pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
+    pub fn readToEndAlloc(self: File, allocator: mem.Allocator, max_bytes: usize) ![]u8 {
         return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
     }
 
@@ -432,7 +432,7 @@ pub const File = struct {
     /// Allows specifying alignment and a sentinel value.
     pub fn readToEndAllocOptions(
         self: File,
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         max_bytes: usize,
         size_hint: ?usize,
         comptime alignment: u29,
diff --git a/lib/std/fs/get_app_data_dir.zig b/lib/std/fs/get_app_data_dir.zig
index 2501a5194b8b..e2a9c5438f7e 100644
--- a/lib/std/fs/get_app_data_dir.zig
+++ b/lib/std/fs/get_app_data_dir.zig
@@ -12,7 +12,7 @@ pub const GetAppDataDirError = error{
 
 /// Caller owns returned memory.
 /// TODO determine if we can remove the allocator requirement
-pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
+pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
     switch (builtin.os.tag) {
         .windows => {
             var dir_path_ptr: [*:0]u16 = undefined;
diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig
index 6372757d372e..323f974255d7 100644
--- a/lib/std/fs/path.zig
+++ b/lib/std/fs/path.zig
@@ -35,7 +35,7 @@ pub fn isSep(byte: u8) bool {
 
 /// This is different from mem.join in that the separator will not be repeated if
 /// it is found at the end or beginning of a pair of consecutive paths.
-fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
+fn joinSepMaybeZ(allocator: Allocator, separator: u8, sepPredicate: fn (u8) bool, paths: []const []const u8, zero: bool) ![]u8 {
     if (paths.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
 
     // Find first non-empty path index.
@@ -99,13 +99,13 @@ fn joinSepMaybeZ(allocator: *Allocator, separator: u8, sepPredicate: fn (u8) boo
 
 /// Naively combines a series of paths with the native path seperator.
 /// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, paths: []const []const u8) ![]u8 {
     return joinSepMaybeZ(allocator, sep, isSep, paths, false);
 }
 
 /// Naively combines a series of paths with the native path seperator and null terminator.
 /// Allocates memory for the result, which must be freed by the caller.
-pub fn joinZ(allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
+pub fn joinZ(allocator: Allocator, paths: []const []const u8) ![:0]u8 {
     const out = try joinSepMaybeZ(allocator, sep, isSep, paths, true);
     return out[0 .. out.len - 1 :0];
 }
@@ -445,7 +445,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
 }
 
 /// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
     if (native_os == .windows) {
         return resolveWindows(allocator, paths);
     } else {
@@ -461,7 +461,7 @@ pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
 /// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
 /// Note: all usage of this function should be audited due to the existence of symlinks.
 /// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
     if (paths.len == 0) {
         assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
         return process.getCwdAlloc(allocator);
@@ -647,7 +647,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
 /// If all paths are relative it uses the current working directory as a starting point.
 /// Note: all usage of this function should be audited due to the existence of symlinks.
 /// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
     if (paths.len == 0) {
         assert(native_os != .windows); // resolvePosix called on windows can't use getCwd
         return process.getCwdAlloc(allocator);
@@ -1058,7 +1058,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) !void {
 /// resolve to the same path (after calling `resolve` on each), a zero-length
 /// string is returned.
 /// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
     if (native_os == .windows) {
         return relativeWindows(allocator, from, to);
     } else {
@@ -1066,7 +1066,7 @@ pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
     }
 }
 
-pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
     const resolved_from = try resolveWindows(allocator, &[_][]const u8{from});
     defer allocator.free(resolved_from);
 
@@ -1139,7 +1139,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
     return [_]u8{};
 }
 
-pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
     const resolved_from = try resolvePosix(allocator, &[_][]const u8{from});
     defer allocator.free(resolved_from);
 
diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig
index f2b584d6d465..437ff5620d7c 100644
--- a/lib/std/fs/test.zig
+++ b/lib/std/fs/test.zig
@@ -52,9 +52,11 @@ test "accessAbsolute" {
 
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
+    const allocator = arena.getAllocator();
+
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
 
     try fs.accessAbsolute(base_path, .{});
@@ -69,9 +71,11 @@ test "openDirAbsolute" {
     try tmp.dir.makeDir("subdir");
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
+    const allocator = arena.getAllocator();
+
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
 
     {
@@ -80,8 +84,8 @@ test "openDirAbsolute" {
     }
 
     for ([_][]const u8{ ".", ".." }) |sub_path| {
-        const dir_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, sub_path });
-        defer arena.allocator.free(dir_path);
+        const dir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, sub_path });
+        defer allocator.free(dir_path);
         var dir = try fs.openDirAbsolute(dir_path, .{});
         defer dir.close();
     }
@@ -107,12 +111,12 @@ test "readLinkAbsolute" {
     // Get base abs path
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
+    const allocator = arena.getAllocator();
 
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
-    const allocator = &arena.allocator;
 
     {
         const target_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "file.txt" });
@@ -158,15 +162,16 @@ test "Dir.Iterator" {
 
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
+    const allocator = arena.getAllocator();
 
-    var entries = std.ArrayList(Dir.Entry).init(&arena.allocator);
+    var entries = std.ArrayList(Dir.Entry).init(allocator);
 
     // Create iterator.
     var iter = tmp_dir.dir.iterate();
     while (try iter.next()) |entry| {
         // We cannot just store `entry` as on Windows, we're re-using the name buffer
         // which means we'll actually share the `name` pointer between entries!
-        const name = try arena.allocator.dupe(u8, entry.name);
+        const name = try allocator.dupe(u8, entry.name);
         try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
     }
 
@@ -202,25 +207,26 @@ test "Dir.realpath smoke test" {
 
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
+    const allocator = arena.getAllocator();
 
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
 
     // First, test non-alloc version
     {
         var buf1: [fs.MAX_PATH_BYTES]u8 = undefined;
         const file_path = try tmp_dir.dir.realpath("test_file", buf1[0..]);
-        const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+        const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
 
         try testing.expect(mem.eql(u8, file_path, expected_path));
     }
 
     // Next, test alloc version
     {
-        const file_path = try tmp_dir.dir.realpathAlloc(&arena.allocator, "test_file");
-        const expected_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "test_file" });
+        const file_path = try tmp_dir.dir.realpathAlloc(allocator, "test_file");
+        const expected_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "test_file" });
 
         try testing.expect(mem.eql(u8, file_path, expected_path));
     }
@@ -476,11 +482,11 @@ test "renameAbsolute" {
     // Get base abs path
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
-    const allocator = &arena.allocator;
+    const allocator = arena.getAllocator();
 
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
 
     try testing.expectError(error.FileNotFound, fs.renameAbsolute(
@@ -987,11 +993,11 @@ test ". and .. in absolute functions" {
 
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
-    const allocator = &arena.allocator;
+    const allocator = arena.getAllocator();
 
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
 
     const subdir_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "./subdir" });
diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig
index 528ccfc0f15c..1a033653d3d5 100644
--- a/lib/std/fs/wasi.zig
+++ b/lib/std/fs/wasi.zig
@@ -80,7 +80,7 @@ pub const PreopenList = struct {
     pub const Error = error{ OutOfMemory, Overflow } || os.UnexpectedError;
 
     /// Deinitialize with `deinit`.
-    pub fn init(allocator: *Allocator) Self {
+    pub fn init(allocator: Allocator) Self {
         return Self{ .buffer = InnerList.init(allocator) };
     }
 
diff --git a/lib/std/fs/watch.zig b/lib/std/fs/watch.zig
index 56544162c8a8..c103925bdd8c 100644
--- a/lib/std/fs/watch.zig
+++ b/lib/std/fs/watch.zig
@@ -30,7 +30,7 @@ pub fn Watch(comptime V: type) type {
     return struct {
         channel: event.Channel(Event.Error!Event),
         os_data: OsData,
-        allocator: *Allocator,
+        allocator: Allocator,
 
         const OsData = switch (builtin.os.tag) {
             // TODO https://github.com/ziglang/zig/issues/3778
@@ -96,7 +96,7 @@ pub fn Watch(comptime V: type) type {
             pub const Error = WatchEventError;
         };
 
-        pub fn init(allocator: *Allocator, event_buf_count: usize) !*Self {
+        pub fn init(allocator: Allocator, event_buf_count: usize) !*Self {
             const self = try allocator.create(Self);
             errdefer allocator.destroy(self);
 
@@ -648,7 +648,7 @@ test "write a file, watch it, write it again, delete it" {
     return testWriteWatchWriteDelete(std.testing.allocator);
 }
 
-fn testWriteWatchWriteDelete(allocator: *Allocator) !void {
+fn testWriteWatchWriteDelete(allocator: Allocator) !void {
     const file_path = try std.fs.path.join(allocator, &[_][]const u8{ test_tmp_dir, "file.txt" });
     defer allocator.free(file_path);
 
diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig
index 5663bed2491e..22fd6526f475 100644
--- a/lib/std/hash/auto_hash.zig
+++ b/lib/std/hash/auto_hash.zig
@@ -309,7 +309,7 @@ test "hash struct deep" {
 
         const Self = @This();
 
-        pub fn init(allocator: *mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
+        pub fn init(allocator: mem.Allocator, a_: u32, b_: u16, c_: bool) !Self {
             const ptr = try allocator.create(bool);
             ptr.* = c_;
             return Self{ .a = a_, .b = b_, .c = ptr };
diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig
index c145fcbae299..5b278ca0b11e 100644
--- a/lib/std/hash/benchmark.zig
+++ b/lib/std/hash/benchmark.zig
@@ -165,7 +165,7 @@ pub fn main() !void {
 
     var buffer: [1024]u8 = undefined;
     var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
-    const args = try std.process.argsAlloc(&fixed.allocator);
+    const args = try std.process.argsAlloc(fixed.getAllocator());
 
     var filter: ?[]u8 = "";
     var count: usize = mode(128 * MiB);
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index cd23ccd39e20..5356bbff1a2f 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -363,7 +363,7 @@ pub fn HashMap(
     comptime verifyContext(Context, K, K, u64);
     return struct {
         unmanaged: Unmanaged,
-        allocator: *Allocator,
+        allocator: Allocator,
         ctx: Context,
 
         /// The type of the unmanaged hash map underlying this wrapper
@@ -390,7 +390,7 @@ pub fn HashMap(
         /// Create a managed hash map with an empty context.
         /// If the context is not zero-sized, you must use
         /// initContext(allocator, ctx) instead.
-        pub fn init(allocator: *Allocator) Self {
+        pub fn init(allocator: Allocator) Self {
             if (@sizeOf(Context) != 0) {
                 @compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
             }
@@ -402,7 +402,7 @@ pub fn HashMap(
         }
 
         /// Create a managed hash map with a context
-        pub fn initContext(allocator: *Allocator, ctx: Context) Self {
+        pub fn initContext(allocator: Allocator, ctx: Context) Self {
             return .{
                 .unmanaged = .{},
                 .allocator = allocator,
@@ -636,7 +636,7 @@ pub fn HashMap(
         }
 
         /// Creates a copy of this map, using a specified allocator
-        pub fn cloneWithAllocator(self: Self, new_allocator: *Allocator) !Self {
+        pub fn cloneWithAllocator(self: Self, new_allocator: Allocator) !Self {
             var other = try self.unmanaged.cloneContext(new_allocator, self.ctx);
             return other.promoteContext(new_allocator, self.ctx);
         }
@@ -650,7 +650,7 @@ pub fn HashMap(
         /// Creates a copy of this map, using a specified allocator and context.
         pub fn cloneWithAllocatorAndContext(
             self: Self,
-            new_allocator: *Allocator,
+            new_allocator: Allocator,
             new_ctx: anytype,
         ) !HashMap(K, V, @TypeOf(new_ctx), max_load_percentage) {
             var other = try self.unmanaged.cloneContext(new_allocator, new_ctx);
@@ -841,13 +841,13 @@ pub fn HashMapUnmanaged(
 
         pub const Managed = HashMap(K, V, Context, max_load_percentage);
 
-        pub fn promote(self: Self, allocator: *Allocator) Managed {
+        pub fn promote(self: Self, allocator: Allocator) Managed {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call promoteContext instead.");
             return promoteContext(self, allocator, undefined);
         }
 
-        pub fn promoteContext(self: Self, allocator: *Allocator, ctx: Context) Managed {
+        pub fn promoteContext(self: Self, allocator: Allocator, ctx: Context) Managed {
             return .{
                 .unmanaged = self,
                 .allocator = allocator,
@@ -859,7 +859,7 @@ pub fn HashMapUnmanaged(
             return size * 100 < max_load_percentage * cap;
         }
 
-        pub fn deinit(self: *Self, allocator: *Allocator) void {
+        pub fn deinit(self: *Self, allocator: Allocator) void {
             self.deallocate(allocator);
             self.* = undefined;
         }
@@ -872,20 +872,20 @@ pub fn HashMapUnmanaged(
 
         pub const ensureCapacity = @compileError("deprecated; call `ensureUnusedCapacity` or `ensureTotalCapacity`");
 
-        pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void {
+        pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_size: Size) !void {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead.");
             return ensureTotalCapacityContext(self, allocator, new_size, undefined);
         }
-        pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void {
+        pub fn ensureTotalCapacityContext(self: *Self, allocator: Allocator, new_size: Size, ctx: Context) !void {
             if (new_size > self.size)
                 try self.growIfNeeded(allocator, new_size - self.size, ctx);
         }
 
-        pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void {
+        pub fn ensureUnusedCapacity(self: *Self, allocator: Allocator, additional_size: Size) !void {
             return ensureUnusedCapacityContext(self, allocator, additional_size, undefined);
         }
-        pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void {
+        pub fn ensureUnusedCapacityContext(self: *Self, allocator: Allocator, additional_size: Size, ctx: Context) !void {
             return ensureTotalCapacityContext(self, allocator, self.count() + additional_size, ctx);
         }
 
@@ -897,7 +897,7 @@ pub fn HashMapUnmanaged(
             }
         }
 
-        pub fn clearAndFree(self: *Self, allocator: *Allocator) void {
+        pub fn clearAndFree(self: *Self, allocator: Allocator) void {
             self.deallocate(allocator);
             self.size = 0;
             self.available = 0;
@@ -962,12 +962,12 @@ pub fn HashMapUnmanaged(
         }
 
         /// Insert an entry in the map. Assumes it is not already present.
-        pub fn putNoClobber(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+        pub fn putNoClobber(self: *Self, allocator: Allocator, key: K, value: V) !void {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putNoClobberContext instead.");
             return self.putNoClobberContext(allocator, key, value, undefined);
         }
-        pub fn putNoClobberContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+        pub fn putNoClobberContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
             assert(!self.containsContext(key, ctx));
             try self.growIfNeeded(allocator, 1, ctx);
 
@@ -1021,12 +1021,12 @@ pub fn HashMapUnmanaged(
         }
 
         /// Inserts a new `Entry` into the hash map, returning the previous one, if any.
-        pub fn fetchPut(self: *Self, allocator: *Allocator, key: K, value: V) !?KV {
+        pub fn fetchPut(self: *Self, allocator: Allocator, key: K, value: V) !?KV {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call fetchPutContext instead.");
             return self.fetchPutContext(allocator, key, value, undefined);
         }
-        pub fn fetchPutContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !?KV {
+        pub fn fetchPutContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !?KV {
             const gop = try self.getOrPutContext(allocator, key, ctx);
             var result: ?KV = null;
             if (gop.found_existing) {
@@ -1157,12 +1157,12 @@ pub fn HashMapUnmanaged(
         }
 
         /// Insert an entry if the associated key is not already present, otherwise update preexisting value.
-        pub fn put(self: *Self, allocator: *Allocator, key: K, value: V) !void {
+        pub fn put(self: *Self, allocator: Allocator, key: K, value: V) !void {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call putContext instead.");
             return self.putContext(allocator, key, value, undefined);
         }
-        pub fn putContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !void {
+        pub fn putContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !void {
             const result = try self.getOrPutContext(allocator, key, ctx);
             result.value_ptr.* = value;
         }
@@ -1231,24 +1231,24 @@ pub fn HashMapUnmanaged(
             return null;
         }
 
-        pub fn getOrPut(self: *Self, allocator: *Allocator, key: K) !GetOrPutResult {
+        pub fn getOrPut(self: *Self, allocator: Allocator, key: K) !GetOrPutResult {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead.");
             return self.getOrPutContext(allocator, key, undefined);
         }
-        pub fn getOrPutContext(self: *Self, allocator: *Allocator, key: K, ctx: Context) !GetOrPutResult {
+        pub fn getOrPutContext(self: *Self, allocator: Allocator, key: K, ctx: Context) !GetOrPutResult {
             const gop = try self.getOrPutContextAdapted(allocator, key, ctx, ctx);
             if (!gop.found_existing) {
                 gop.key_ptr.* = key;
             }
             return gop;
         }
-        pub fn getOrPutAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
+        pub fn getOrPutAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype) !GetOrPutResult {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContextAdapted instead.");
             return self.getOrPutContextAdapted(allocator, key, key_ctx, undefined);
         }
-        pub fn getOrPutContextAdapted(self: *Self, allocator: *Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
+        pub fn getOrPutContextAdapted(self: *Self, allocator: Allocator, key: anytype, key_ctx: anytype, ctx: Context) !GetOrPutResult {
             self.growIfNeeded(allocator, 1, ctx) catch |err| {
                 // If allocation fails, try to do the lookup anyway.
                 // If we find an existing item, we can return it.
@@ -1341,12 +1341,12 @@ pub fn HashMapUnmanaged(
             };
         }
 
-        pub fn getOrPutValue(self: *Self, allocator: *Allocator, key: K, value: V) !Entry {
+        pub fn getOrPutValue(self: *Self, allocator: Allocator, key: K, value: V) !Entry {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutValueContext instead.");
             return self.getOrPutValueContext(allocator, key, value, undefined);
         }
-        pub fn getOrPutValueContext(self: *Self, allocator: *Allocator, key: K, value: V, ctx: Context) !Entry {
+        pub fn getOrPutValueContext(self: *Self, allocator: Allocator, key: K, value: V, ctx: Context) !Entry {
             const res = try self.getOrPutAdapted(allocator, key, ctx);
             if (!res.found_existing) {
                 res.key_ptr.* = key;
@@ -1403,18 +1403,18 @@ pub fn HashMapUnmanaged(
             return @truncate(Size, max_load - self.available);
         }
 
-        fn growIfNeeded(self: *Self, allocator: *Allocator, new_count: Size, ctx: Context) !void {
+        fn growIfNeeded(self: *Self, allocator: Allocator, new_count: Size, ctx: Context) !void {
             if (new_count > self.available) {
                 try self.grow(allocator, capacityForSize(self.load() + new_count), ctx);
             }
         }
 
-        pub fn clone(self: Self, allocator: *Allocator) !Self {
+        pub fn clone(self: Self, allocator: Allocator) !Self {
             if (@sizeOf(Context) != 0)
                 @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
             return self.cloneContext(allocator, @as(Context, undefined));
         }
-        pub fn cloneContext(self: Self, allocator: *Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
+        pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) !HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
             var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
             if (self.size == 0)
                 return other;
@@ -1439,7 +1439,7 @@ pub fn HashMapUnmanaged(
             return other;
         }
 
-        fn grow(self: *Self, allocator: *Allocator, new_capacity: Size, ctx: Context) !void {
+        fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) !void {
             @setCold(true);
             const new_cap = std.math.max(new_capacity, minimal_capacity);
             assert(new_cap > self.capacity());
@@ -1470,7 +1470,7 @@ pub fn HashMapUnmanaged(
             std.mem.swap(Self, self, &map);
         }
 
-        fn allocate(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
+        fn allocate(self: *Self, allocator: Allocator, new_capacity: Size) !void {
             const header_align = @alignOf(Header);
             const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K);
             const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V);
@@ -1503,7 +1503,7 @@ pub fn HashMapUnmanaged(
             self.metadata = @intToPtr([*]Metadata, metadata);
         }
 
-        fn deallocate(self: *Self, allocator: *Allocator) void {
+        fn deallocate(self: *Self, allocator: Allocator) void {
             if (self.metadata == null) return;
 
             const header_align = @alignOf(Header);
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index fcea90d751ee..c265fc240861 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -97,13 +97,12 @@ const CAllocator = struct {
     }
 
     fn alloc(
-        allocator: *Allocator,
+        _: *u1,
         len: usize,
         alignment: u29,
         len_align: u29,
         return_address: usize,
     ) error{OutOfMemory}![]u8 {
-        _ = allocator;
         _ = return_address;
         assert(len > 0);
         assert(std.math.isPowerOfTwo(alignment));
@@ -124,14 +123,13 @@ const CAllocator = struct {
     }
 
     fn resize(
-        allocator: *Allocator,
+        _: *u1,
         buf: []u8,
         buf_align: u29,
         new_len: usize,
         len_align: u29,
         return_address: usize,
     ) Allocator.Error!usize {
-        _ = allocator;
         _ = buf_align;
         _ = return_address;
         if (new_len == 0) {
@@ -154,10 +152,11 @@ const CAllocator = struct {
 /// Supports the full Allocator interface, including alignment, and exploiting
 /// `malloc_usable_size` if available. For an allocator that directly calls
 /// `malloc`/`free`, see `raw_c_allocator`.
-pub const c_allocator = &c_allocator_state;
-var c_allocator_state = Allocator{
-    .allocFn = CAllocator.alloc,
-    .resizeFn = CAllocator.resize,
+pub const c_allocator = blk: {
+    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+    // allowing the use of `*void` but it would still be ugly
+    var tmp: u1 = 0;
+    break :blk Allocator.init(&tmp, CAllocator.alloc, CAllocator.resize);
 };
 
 /// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
@@ -165,20 +164,20 @@ var c_allocator_state = Allocator{
 /// This allocator is safe to use as the backing allocator with
 /// `ArenaAllocator` for example and is more optimal in such a case
 /// than `c_allocator`.
-pub const raw_c_allocator = &raw_c_allocator_state;
-var raw_c_allocator_state = Allocator{
-    .allocFn = rawCAlloc,
-    .resizeFn = rawCResize,
+pub const raw_c_allocator = blk: {
+    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+    // allowing the use of `*void` but it would still be ugly
+    var tmp: u1 = 0;
+    break :blk Allocator.init(&tmp, rawCAlloc, rawCResize);
 };
 
 fn rawCAlloc(
-    self: *Allocator,
+    _: *u1,
     len: usize,
     ptr_align: u29,
     len_align: u29,
     ret_addr: usize,
 ) Allocator.Error![]u8 {
-    _ = self;
     _ = len_align;
     _ = ret_addr;
     assert(ptr_align <= @alignOf(std.c.max_align_t));
@@ -187,14 +186,13 @@ fn rawCAlloc(
 }
 
 fn rawCResize(
-    self: *Allocator,
+    _: *u1,
     buf: []u8,
     old_align: u29,
     new_len: usize,
     len_align: u29,
     ret_addr: usize,
 ) Allocator.Error!usize {
-    _ = self;
     _ = old_align;
     _ = ret_addr;
     if (new_len == 0) {
@@ -210,19 +208,18 @@ fn rawCResize(
 /// This allocator makes a syscall directly for every allocation and free.
 /// Thread-safe and lock-free.
 pub const page_allocator = if (builtin.target.isWasm())
-    &wasm_page_allocator_state
-else if (builtin.target.os.tag == .freestanding)
+blk: {
+    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+    // allowing the use of `*void` but it would still be ugly
+    var tmp: u1 = 0;
+    break :blk Allocator.init(&tmp, WasmPageAllocator.alloc, WasmPageAllocator.resize);
+} else if (builtin.target.os.tag == .freestanding)
     root.os.heap.page_allocator
-else
-    &page_allocator_state;
-
-var page_allocator_state = Allocator{
-    .allocFn = PageAllocator.alloc,
-    .resizeFn = PageAllocator.resize,
-};
-var wasm_page_allocator_state = Allocator{
-    .allocFn = WasmPageAllocator.alloc,
-    .resizeFn = WasmPageAllocator.resize,
+else blk: {
+    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+    // allowing the use of `*void` but it would still be ugly
+    var tmp: u1 = 0;
+    break :blk Allocator.init(&tmp, PageAllocator.alloc, PageAllocator.resize);
 };
 
 /// Verifies that the adjusted length will still map to the full length
@@ -236,8 +233,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
 pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
 
 const PageAllocator = struct {
-    fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
-        _ = allocator;
+    fn alloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
         _ = ra;
         assert(n > 0);
         const aligned_len = mem.alignForward(n, mem.page_size);
@@ -335,14 +331,13 @@ const PageAllocator = struct {
     }
 
     fn resize(
-        allocator: *Allocator,
+        _: *u1,
         buf_unaligned: []u8,
         buf_align: u29,
         new_size: usize,
         len_align: u29,
         return_address: usize,
     ) Allocator.Error!usize {
-        _ = allocator;
         _ = buf_align;
         _ = return_address;
         const new_size_aligned = mem.alignForward(new_size, mem.page_size);
@@ -492,8 +487,7 @@ const WasmPageAllocator = struct {
         return mem.alignForward(memsize, mem.page_size) / mem.page_size;
     }
 
-    fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
-        _ = allocator;
+    fn alloc(_: *u1, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
         _ = ra;
         const page_count = nPages(len);
         const page_idx = try allocPages(page_count, alignment);
@@ -548,14 +542,13 @@ const WasmPageAllocator = struct {
     }
 
     fn resize(
-        allocator: *Allocator,
+        _: *u1,
         buf: []u8,
         buf_align: u29,
         new_len: usize,
         len_align: u29,
         return_address: usize,
     ) error{OutOfMemory}!usize {
-        _ = allocator;
         _ = buf_align;
         _ = return_address;
         const aligned_len = mem.alignForward(buf.len, mem.page_size);
@@ -572,21 +565,20 @@ const WasmPageAllocator = struct {
 
 pub const HeapAllocator = switch (builtin.os.tag) {
     .windows => struct {
-        allocator: Allocator,
         heap_handle: ?HeapHandle,
 
         const HeapHandle = os.windows.HANDLE;
 
         pub fn init() HeapAllocator {
             return HeapAllocator{
-                .allocator = Allocator{
-                    .allocFn = alloc,
-                    .resizeFn = resize,
-                },
                 .heap_handle = null,
             };
         }
 
+        pub fn getAllocator(self: *HeapAllocator) Allocator {
+            return Allocator.init(self, alloc, resize);
+        }
+
         pub fn deinit(self: *HeapAllocator) void {
             if (self.heap_handle) |heap_handle| {
                 os.windows.HeapDestroy(heap_handle);
@@ -598,14 +590,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         }
 
         fn alloc(
-            allocator: *Allocator,
+            self: *HeapAllocator,
             n: usize,
             ptr_align: u29,
             len_align: u29,
             return_address: usize,
         ) error{OutOfMemory}![]u8 {
             _ = return_address;
-            const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
 
             const amt = n + ptr_align - 1 + @sizeOf(usize);
             const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
@@ -632,7 +623,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         }
 
         fn resize(
-            allocator: *Allocator,
+            self: *HeapAllocator,
             buf: []u8,
             buf_align: u29,
             new_size: usize,
@@ -641,7 +632,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
         ) error{OutOfMemory}!usize {
             _ = buf_align;
             _ = return_address;
-            const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
             if (new_size == 0) {
                 os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
                 return 0;
@@ -682,21 +672,27 @@ fn sliceContainsSlice(container: []u8, slice: []u8) bool {
 }
 
 pub const FixedBufferAllocator = struct {
-    allocator: Allocator,
     end_index: usize,
     buffer: []u8,
 
     pub fn init(buffer: []u8) FixedBufferAllocator {
         return FixedBufferAllocator{
-            .allocator = Allocator{
-                .allocFn = alloc,
-                .resizeFn = resize,
-            },
             .buffer = buffer,
             .end_index = 0,
         };
     }
 
+    /// *WARNING* using this at the same time as the interface returned by `getThreadSafeAllocator` is not thread safe
+    pub fn getAllocator(self: *FixedBufferAllocator) Allocator {
+        return Allocator.init(self, alloc, resize);
+    }
+
+    /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
+    /// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
+    pub fn getThreadSafeAllocator(self: *FixedBufferAllocator) Allocator {
+        return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize);
+    }
+
     pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
         return sliceContainsPtr(self.buffer, ptr);
     }
@@ -712,10 +708,9 @@ pub const FixedBufferAllocator = struct {
         return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
     }
 
-    fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+    fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
         _ = len_align;
         _ = ra;
-        const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
         const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
             return error.OutOfMemory;
         const adjusted_index = self.end_index + adjust_off;
@@ -730,7 +725,7 @@ pub const FixedBufferAllocator = struct {
     }
 
     fn resize(
-        allocator: *Allocator,
+        self: *FixedBufferAllocator,
         buf: []u8,
         buf_align: u29,
         new_size: usize,
@@ -739,7 +734,6 @@ pub const FixedBufferAllocator = struct {
     ) Allocator.Error!usize {
         _ = buf_align;
         _ = return_address;
-        const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
         assert(self.ownsSlice(buf)); // sanity check
 
         if (!self.isLastAllocation(buf)) {
@@ -762,65 +756,34 @@ pub const FixedBufferAllocator = struct {
         return new_size;
     }
 
+    fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+        _ = len_align;
+        _ = ra;
+        var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
+        while (true) {
+            const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
+                return error.OutOfMemory;
+            const adjusted_index = end_index + adjust_off;
+            const new_end_index = adjusted_index + n;
+            if (new_end_index > self.buffer.len) {
+                return error.OutOfMemory;
+            }
+            end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
+        }
+    }
+
     pub fn reset(self: *FixedBufferAllocator) void {
         self.end_index = 0;
     }
 };
 
-pub const ThreadSafeFixedBufferAllocator = blk: {
-    if (builtin.single_threaded) {
-        break :blk FixedBufferAllocator;
-    } else {
-        // lock free
-        break :blk struct {
-            allocator: Allocator,
-            end_index: usize,
-            buffer: []u8,
-
-            pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
-                return ThreadSafeFixedBufferAllocator{
-                    .allocator = Allocator{
-                        .allocFn = alloc,
-                        .resizeFn = Allocator.noResize,
-                    },
-                    .buffer = buffer,
-                    .end_index = 0,
-                };
-            }
-
-            fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
-                _ = len_align;
-                _ = ra;
-                const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
-                var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
-                while (true) {
-                    const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
-                        return error.OutOfMemory;
-                    const adjusted_index = end_index + adjust_off;
-                    const new_end_index = adjusted_index + n;
-                    if (new_end_index > self.buffer.len) {
-                        return error.OutOfMemory;
-                    }
-                    end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
-                }
-            }
+pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `getThreadSafeAllocator` on FixedBufferAllocator");
 
-            pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
-                self.end_index = 0;
-            }
-        };
-    }
-};
-
-pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
     return StackFallbackAllocator(size){
         .buffer = undefined,
         .fallback_allocator = fallback_allocator,
         .fixed_buffer_allocator = undefined,
-        .allocator = Allocator{
-            .allocFn = StackFallbackAllocator(size).alloc,
-            .resizeFn = StackFallbackAllocator(size).resize,
-        },
     };
 }
 
@@ -829,40 +792,38 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
         const Self = @This();
 
         buffer: [size]u8,
-        allocator: Allocator,
-        fallback_allocator: *Allocator,
+        fallback_allocator: Allocator,
         fixed_buffer_allocator: FixedBufferAllocator,
 
-        pub fn get(self: *Self) *Allocator {
+        /// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
+        pub fn get(self: *Self) Allocator {
             self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
-            return &self.allocator;
+            return Allocator.init(self, alloc, resize);
         }
 
         fn alloc(
-            allocator: *Allocator,
+            self: *Self,
             len: usize,
             ptr_align: u29,
             len_align: u29,
             return_address: usize,
         ) error{OutOfMemory}![]u8 {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
-            return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, len, ptr_align, len_align, return_address) catch
-                return self.fallback_allocator.allocFn(self.fallback_allocator, len, ptr_align, len_align, return_address);
+            return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
+                return self.fallback_allocator.allocFn(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address);
         }
 
         fn resize(
-            allocator: *Allocator,
+            self: *Self,
             buf: []u8,
             buf_align: u29,
             new_len: usize,
             len_align: u29,
             return_address: usize,
         ) error{OutOfMemory}!usize {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
             if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
-                return FixedBufferAllocator.resize(&self.fixed_buffer_allocator.allocator, buf, buf_align, new_len, len_align, return_address);
+                return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
             } else {
-                return self.fallback_allocator.resizeFn(self.fallback_allocator, buf, buf_align, new_len, len_align, return_address);
+                return self.fallback_allocator.resizeFn(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address);
             }
         }
     };
@@ -950,8 +911,8 @@ test "HeapAllocator" {
     if (builtin.os.tag == .windows) {
         var heap_allocator = HeapAllocator.init();
         defer heap_allocator.deinit();
+        const allocator = heap_allocator.getAllocator();
 
-        const allocator = &heap_allocator.allocator;
         try testAllocator(allocator);
         try testAllocatorAligned(allocator);
         try testAllocatorLargeAlignment(allocator);
@@ -962,36 +923,39 @@ test "HeapAllocator" {
 test "ArenaAllocator" {
     var arena_allocator = ArenaAllocator.init(page_allocator);
     defer arena_allocator.deinit();
+    const allocator = arena_allocator.getAllocator();
 
-    try testAllocator(&arena_allocator.allocator);
-    try testAllocatorAligned(&arena_allocator.allocator);
-    try testAllocatorLargeAlignment(&arena_allocator.allocator);
-    try testAllocatorAlignedShrink(&arena_allocator.allocator);
+    try testAllocator(allocator);
+    try testAllocatorAligned(allocator);
+    try testAllocatorLargeAlignment(allocator);
+    try testAllocatorAlignedShrink(allocator);
 }
 
 var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
 test "FixedBufferAllocator" {
     var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
+    const allocator = fixed_buffer_allocator.getAllocator();
 
-    try testAllocator(&fixed_buffer_allocator.allocator);
-    try testAllocatorAligned(&fixed_buffer_allocator.allocator);
-    try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
-    try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+    try testAllocator(allocator);
+    try testAllocatorAligned(allocator);
+    try testAllocatorLargeAlignment(allocator);
+    try testAllocatorAlignedShrink(allocator);
 }
 
 test "FixedBufferAllocator.reset" {
     var buf: [8]u8 align(@alignOf(u64)) = undefined;
     var fba = FixedBufferAllocator.init(buf[0..]);
+    const allocator = fba.getAllocator();
 
     const X = 0xeeeeeeeeeeeeeeee;
     const Y = 0xffffffffffffffff;
 
-    var x = try fba.allocator.create(u64);
+    var x = try allocator.create(u64);
     x.* = X;
-    try testing.expectError(error.OutOfMemory, fba.allocator.create(u64));
+    try testing.expectError(error.OutOfMemory, allocator.create(u64));
 
     fba.reset();
-    var y = try fba.allocator.create(u64);
+    var y = try allocator.create(u64);
     y.* = Y;
 
     // we expect Y to have overwritten X.
@@ -1014,23 +978,25 @@ test "FixedBufferAllocator Reuse memory on realloc" {
     // check if we re-use the memory
     {
         var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+        const allocator = fixed_buffer_allocator.getAllocator();
 
-        var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
+        var slice0 = try allocator.alloc(u8, 5);
         try testing.expect(slice0.len == 5);
-        var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
+        var slice1 = try allocator.realloc(slice0, 10);
         try testing.expect(slice1.ptr == slice0.ptr);
         try testing.expect(slice1.len == 10);
-        try testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
+        try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
     }
     // check that we don't re-use the memory if it's not the most recent block
     {
         var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
+        const allocator = fixed_buffer_allocator.getAllocator();
 
-        var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+        var slice0 = try allocator.alloc(u8, 2);
         slice0[0] = 1;
         slice0[1] = 2;
-        var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
-        var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
+        var slice1 = try allocator.alloc(u8, 2);
+        var slice2 = try allocator.realloc(slice0, 4);
         try testing.expect(slice0.ptr != slice2.ptr);
         try testing.expect(slice1.ptr != slice2.ptr);
         try testing.expect(slice2[0] == 1);
@@ -1038,19 +1004,19 @@ test "FixedBufferAllocator Reuse memory on realloc" {
     }
 }
 
-test "ThreadSafeFixedBufferAllocator" {
-    var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
+test "Thread safe FixedBufferAllocator" {
+    var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
 
-    try testAllocator(&fixed_buffer_allocator.allocator);
-    try testAllocatorAligned(&fixed_buffer_allocator.allocator);
-    try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
-    try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+    try testAllocator(fixed_buffer_allocator.getThreadSafeAllocator());
+    try testAllocatorAligned(fixed_buffer_allocator.getThreadSafeAllocator());
+    try testAllocatorLargeAlignment(fixed_buffer_allocator.getThreadSafeAllocator());
+    try testAllocatorAlignedShrink(fixed_buffer_allocator.getThreadSafeAllocator());
 }
 
 /// This one should not try alignments that exceed what C malloc can handle.
-pub fn testAllocator(base_allocator: *mem.Allocator) !void {
+pub fn testAllocator(base_allocator: mem.Allocator) !void {
     var validationAllocator = mem.validationWrap(base_allocator);
-    const allocator = &validationAllocator.allocator;
+    const allocator = validationAllocator.getAllocator();
 
     var slice = try allocator.alloc(*i32, 100);
     try testing.expect(slice.len == 100);
@@ -1094,9 +1060,9 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
     allocator.free(oversize);
 }
 
-pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
     var validationAllocator = mem.validationWrap(base_allocator);
-    const allocator = &validationAllocator.allocator;
+    const allocator = validationAllocator.getAllocator();
 
     // Test a few alignment values, smaller and bigger than the type's one
     inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
@@ -1124,9 +1090,9 @@ pub fn testAllocatorAligned(base_allocator: *mem.Allocator) !void {
     }
 }
 
-pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
     var validationAllocator = mem.validationWrap(base_allocator);
-    const allocator = &validationAllocator.allocator;
+    const allocator = validationAllocator.getAllocator();
 
     //Maybe a platform's page_size is actually the same as or
     //  very near usize?
@@ -1156,12 +1122,12 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) !void {
     allocator.free(slice);
 }
 
-pub fn testAllocatorAlignedShrink(base_allocator: *mem.Allocator) !void {
+pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
     var validationAllocator = mem.validationWrap(base_allocator);
-    const allocator = &validationAllocator.allocator;
+    const allocator = validationAllocator.getAllocator();
 
     var debug_buffer: [1000]u8 = undefined;
-    const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
+    const debug_allocator = FixedBufferAllocator.init(&debug_buffer).getAllocator();
 
     const alloc_size = mem.page_size * 2 + 50;
     var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig
index d61f66ce4a50..65b08399456f 100644
--- a/lib/std/heap/arena_allocator.zig
+++ b/lib/std/heap/arena_allocator.zig
@@ -6,9 +6,7 @@ const Allocator = std.mem.Allocator;
 /// This allocator takes an existing allocator, wraps it, and provides an interface
 /// where you can allocate without freeing, and then free it all together.
 pub const ArenaAllocator = struct {
-    allocator: Allocator,
-
-    child_allocator: *Allocator,
+    child_allocator: Allocator,
     state: State,
 
     /// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
@@ -17,21 +15,21 @@ pub const ArenaAllocator = struct {
         buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
         end_index: usize = 0,
 
-        pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
+        pub fn promote(self: State, child_allocator: Allocator) ArenaAllocator {
             return .{
-                .allocator = Allocator{
-                    .allocFn = alloc,
-                    .resizeFn = resize,
-                },
                 .child_allocator = child_allocator,
                 .state = self,
             };
         }
     };
 
+    pub fn getAllocator(self: *ArenaAllocator) Allocator {
+        return Allocator.init(self, alloc, resize);
+    }
+
     const BufNode = std.SinglyLinkedList([]u8).Node;
 
-    pub fn init(child_allocator: *Allocator) ArenaAllocator {
+    pub fn init(child_allocator: Allocator) ArenaAllocator {
         return (State{}).promote(child_allocator);
     }
 
@@ -49,7 +47,7 @@ pub const ArenaAllocator = struct {
         const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
         const big_enough_len = prev_len + actual_min_size;
         const len = big_enough_len + big_enough_len / 2;
-        const buf = try self.child_allocator.allocFn(self.child_allocator, len, @alignOf(BufNode), 1, @returnAddress());
+        const buf = try self.child_allocator.allocFn(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress());
         const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
         buf_node.* = BufNode{
             .data = buf,
@@ -60,10 +58,9 @@ pub const ArenaAllocator = struct {
         return buf_node;
     }
 
-    fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+    fn alloc(self: *ArenaAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
         _ = len_align;
         _ = ra;
-        const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
 
         var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
         while (true) {
@@ -91,11 +88,10 @@ pub const ArenaAllocator = struct {
         }
     }
 
-    fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
+    fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
         _ = buf_align;
         _ = len_align;
         _ = ret_addr;
-        const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
 
         const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
         const cur_buf = cur_node.data[@sizeOf(BufNode)..];
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index c3c28a53b6c9..d4f1dde29979 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -172,11 +172,7 @@ pub const Config = struct {
 
 pub fn GeneralPurposeAllocator(comptime config: Config) type {
     return struct {
-        allocator: Allocator = Allocator{
-            .allocFn = alloc,
-            .resizeFn = resize,
-        },
-        backing_allocator: *Allocator = std.heap.page_allocator,
+        backing_allocator: Allocator = std.heap.page_allocator,
         buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
         large_allocations: LargeAllocTable = .{},
         empty_buckets: if (config.retain_metadata) ?*BucketHeader else void =
@@ -284,6 +280,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             }
         };
 
+        pub fn getAllocator(self: *Self) Allocator {
+            return Allocator.init(self, alloc, resize);
+        }
+
         fn bucketStackTrace(
             bucket: *BucketHeader,
             size_class: usize,
@@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
                     var it = self.large_allocations.iterator();
                     while (it.next()) |large| {
                         if (large.value_ptr.freed) {
-                            _ = self.backing_allocator.resizeFn(self.backing_allocator, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
+                            _ = self.backing_allocator.resizeFn(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable;
                         }
                     }
                 }
@@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             const result_len = if (config.never_unmap and new_size == 0)
                 0
             else
-                try self.backing_allocator.resizeFn(self.backing_allocator, old_mem, old_align, new_size, len_align, ret_addr);
+                try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr);
 
             if (config.enable_memory_limit) {
                 entry.value_ptr.requested_size = new_size;
@@ -606,15 +606,13 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
         }
 
         fn resize(
-            allocator: *Allocator,
+            self: *Self,
             old_mem: []u8,
             old_align: u29,
             new_size: usize,
             len_align: u29,
             ret_addr: usize,
         ) Error!usize {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
-
             self.mutex.lock();
             defer self.mutex.unlock();
 
@@ -755,9 +753,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             return true;
         }
 
-        fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
-
+        fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
             self.mutex.lock();
             defer self.mutex.unlock();
 
@@ -768,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
             const new_aligned_size = math.max(len, ptr_align);
             if (new_aligned_size > largest_bucket_object_size) {
                 try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
-                const slice = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr);
+                const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr);
 
                 const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
                 if (config.retain_metadata and !config.never_unmap) {
@@ -834,7 +830,7 @@ const test_config = Config{};
 test "small allocations - free in same order" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var list = std.ArrayList(*u64).init(std.testing.allocator);
     defer list.deinit();
@@ -853,7 +849,7 @@ test "small allocations - free in same order" {
 test "small allocations - free in reverse order" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var list = std.ArrayList(*u64).init(std.testing.allocator);
     defer list.deinit();
@@ -872,7 +868,7 @@ test "small allocations - free in reverse order" {
 test "large allocations" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     const ptr1 = try allocator.alloc(u64, 42768);
     const ptr2 = try allocator.alloc(u64, 52768);
@@ -885,7 +881,7 @@ test "large allocations" {
 test "realloc" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
     defer allocator.free(slice);
@@ -907,7 +903,7 @@ test "realloc" {
 test "shrink" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice = try allocator.alloc(u8, 20);
     defer allocator.free(slice);
@@ -930,7 +926,7 @@ test "shrink" {
 test "large object - grow" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
     defer allocator.free(slice1);
@@ -948,7 +944,7 @@ test "large object - grow" {
 test "realloc small object to large object" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice = try allocator.alloc(u8, 70);
     defer allocator.free(slice);
@@ -965,7 +961,7 @@ test "realloc small object to large object" {
 test "shrink large object to large object" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice = try allocator.alloc(u8, page_size * 2 + 50);
     defer allocator.free(slice);
@@ -988,10 +984,10 @@ test "shrink large object to large object" {
 test "shrink large object to large object with larger alignment" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var debug_buffer: [1000]u8 = undefined;
-    const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+    const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
 
     const alloc_size = page_size * 2 + 50;
     var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
@@ -1023,7 +1019,7 @@ test "shrink large object to large object with larger alignment" {
 test "realloc large object to small object" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice = try allocator.alloc(u8, page_size * 2 + 50);
     defer allocator.free(slice);
@@ -1041,7 +1037,7 @@ test "overrideable mutexes" {
         .mutex = std.Thread.Mutex{},
     };
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     const ptr = try allocator.create(i32);
     defer allocator.destroy(ptr);
@@ -1050,7 +1046,7 @@ test "overrideable mutexes" {
 test "non-page-allocator backing allocator" {
     var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     const ptr = try allocator.create(i32);
     defer allocator.destroy(ptr);
@@ -1059,10 +1055,10 @@ test "non-page-allocator backing allocator" {
 test "realloc large object to larger alignment" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var debug_buffer: [1000]u8 = undefined;
-    const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
+    const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator();
 
     var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
     defer allocator.free(slice);
@@ -1098,9 +1094,9 @@ test "realloc large object to larger alignment" {
 
 test "large object shrinks to small but allocation fails during shrink" {
     var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
-    var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
+    var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.getAllocator() };
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     var slice = try allocator.alloc(u8, page_size * 2 + 50);
     defer allocator.free(slice);
@@ -1117,7 +1113,7 @@ test "large object shrinks to small but allocation fails during shrink" {
 test "objects of size 1024 and 2048" {
     var gpa = GeneralPurposeAllocator(test_config){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     const slice = try allocator.alloc(u8, 1025);
     const slice2 = try allocator.alloc(u8, 3000);
@@ -1129,7 +1125,7 @@ test "objects of size 1024 and 2048" {
 test "setting a memory cap" {
     var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     gpa.setRequestedMemoryLimit(1010);
 
@@ -1158,9 +1154,9 @@ test "double frees" {
     defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak");
 
     const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true });
-    var gpa = GPA{ .backing_allocator = &backing_gpa.allocator };
+    var gpa = GPA{ .backing_allocator = backing_gpa.getAllocator() };
     defer std.testing.expect(!gpa.deinit()) catch @panic("leak");
-    const allocator = &gpa.allocator;
+    const allocator = gpa.getAllocator();
 
     // detect a small allocation double free, even though bucket is emptied
     const index: usize = 6;
diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig
index cf9c4162a742..1f3146f79fb3 100644
--- a/lib/std/heap/log_to_writer_allocator.zig
+++ b/lib/std/heap/log_to_writer_allocator.zig
@@ -5,33 +5,31 @@ const Allocator = std.mem.Allocator;
 /// on every call to the allocator. Writer errors are ignored.
 pub fn LogToWriterAllocator(comptime Writer: type) type {
     return struct {
-        allocator: Allocator,
-        parent_allocator: *Allocator,
+        parent_allocator: Allocator,
         writer: Writer,
 
         const Self = @This();
 
-        pub fn init(parent_allocator: *Allocator, writer: Writer) Self {
+        pub fn init(parent_allocator: Allocator, writer: Writer) Self {
             return Self{
-                .allocator = Allocator{
-                    .allocFn = alloc,
-                    .resizeFn = resize,
-                },
                 .parent_allocator = parent_allocator,
                 .writer = writer,
             };
         }
 
+        pub fn getAllocator(self: *Self) Allocator {
+            return Allocator.init(self, alloc, resize);
+        }
+
         fn alloc(
-            allocator: *Allocator,
+            self: *Self,
             len: usize,
             ptr_align: u29,
             len_align: u29,
             ra: usize,
         ) error{OutOfMemory}![]u8 {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
             self.writer.print("alloc : {}", .{len}) catch {};
-            const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
+            const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra);
             if (result) |_| {
                 self.writer.print(" success!\n", .{}) catch {};
             } else |_| {
@@ -41,14 +39,13 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
         }
 
         fn resize(
-            allocator: *Allocator,
+            self: *Self,
             buf: []u8,
             buf_align: u29,
             new_len: usize,
             len_align: u29,
             ra: usize,
         ) error{OutOfMemory}!usize {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
             if (new_len == 0) {
                 self.writer.print("free  : {}\n", .{buf.len}) catch {};
             } else if (new_len <= buf.len) {
@@ -56,7 +53,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
             } else {
                 self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
             }
-            if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
+            if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| {
                 if (new_len > buf.len) {
                     self.writer.print(" success!\n", .{}) catch {};
                 }
@@ -73,7 +70,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
 /// This allocator is used in front of another allocator and logs to the provided writer
 /// on every call to the allocator. Writer errors are ignored.
 pub fn logToWriterAllocator(
-    parent_allocator: *Allocator,
+    parent_allocator: Allocator,
     writer: anytype,
 ) LogToWriterAllocator(@TypeOf(writer)) {
     return LogToWriterAllocator(@TypeOf(writer)).init(parent_allocator, writer);
@@ -85,7 +82,7 @@ test "LogToWriterAllocator" {
 
     var allocator_buf: [10]u8 = undefined;
     var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf));
-    const allocator = &logToWriterAllocator(&fixedBufferAllocator.allocator, fbs.writer()).allocator;
+    const allocator = logToWriterAllocator(fixedBufferAllocator.getAllocator(), fbs.writer()).getAllocator();
 
     var a = try allocator.alloc(u8, 10);
     a = allocator.shrink(a, 5);
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 0c6224b7ce15..34dc554dee5f 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -22,21 +22,20 @@ pub fn ScopedLoggingAllocator(
     const log = std.log.scoped(scope);
 
     return struct {
-        allocator: Allocator,
-        parent_allocator: *Allocator,
+        parent_allocator: Allocator,
 
         const Self = @This();
 
-        pub fn init(parent_allocator: *Allocator) Self {
+        pub fn init(parent_allocator: Allocator) Self {
             return .{
-                .allocator = Allocator{
-                    .allocFn = alloc,
-                    .resizeFn = resize,
-                },
                 .parent_allocator = parent_allocator,
             };
         }
 
+        pub fn getAllocator(self: *Self) Allocator {
+            return Allocator.init(self, alloc, resize);
+        }
+
         // This function is required as the `std.log.log` function is not public
         inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
             switch (log_level) {
@@ -48,13 +47,12 @@ pub fn ScopedLoggingAllocator(
         }
 
         fn alloc(
-            allocator: *Allocator,
+            self: *Self,
             len: usize,
             ptr_align: u29,
             len_align: u29,
             ra: usize,
         ) error{OutOfMemory}![]u8 {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
             const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
             if (result) |_| {
                 logHelper(
@@ -73,15 +71,13 @@ pub fn ScopedLoggingAllocator(
         }
 
         fn resize(
-            allocator: *Allocator,
+            self: *Self,
             buf: []u8,
             buf_align: u29,
             new_len: usize,
             len_align: u29,
             ra: usize,
         ) error{OutOfMemory}!usize {
-            const self = @fieldParentPtr(Self, "allocator", allocator);
-
             if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| {
                 if (new_len == 0) {
                     logHelper(success_log_level, "free - success - len: {}", .{buf.len});
@@ -116,6 +112,6 @@ pub fn ScopedLoggingAllocator(
 /// This allocator is used in front of another allocator and logs to `std.log`
 /// on every call to the allocator.
 /// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .err) {
+pub fn loggingAllocator(parent_allocator: Allocator) LoggingAllocator(.debug, .err) {
     return LoggingAllocator(.debug, .err).init(parent_allocator);
 }
diff --git a/lib/std/io/buffered_atomic_file.zig b/lib/std/io/buffered_atomic_file.zig
index 5b27ba78f1a4..71edabb20a5d 100644
--- a/lib/std/io/buffered_atomic_file.zig
+++ b/lib/std/io/buffered_atomic_file.zig
@@ -7,7 +7,7 @@ pub const BufferedAtomicFile = struct {
     atomic_file: fs.AtomicFile,
     file_writer: File.Writer,
     buffered_writer: BufferedWriter,
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
 
     pub const buffer_size = 4096;
     pub const BufferedWriter = std.io.BufferedWriter(buffer_size, File.Writer);
@@ -16,7 +16,7 @@ pub const BufferedAtomicFile = struct {
     /// TODO when https://github.com/ziglang/zig/issues/2761 is solved
     /// this API will not need an allocator
     pub fn create(
-        allocator: *mem.Allocator,
+        allocator: mem.Allocator,
         dir: fs.Dir,
         dest_path: []const u8,
         atomic_file_options: fs.Dir.AtomicFileOptions,
diff --git a/lib/std/io/peek_stream.zig b/lib/std/io/peek_stream.zig
index c77052f97591..8779e22250eb 100644
--- a/lib/std/io/peek_stream.zig
+++ b/lib/std/io/peek_stream.zig
@@ -38,7 +38,7 @@ pub fn PeekStream(
                 }
             },
             .Dynamic => struct {
-                pub fn init(base: ReaderType, allocator: *mem.Allocator) Self {
+                pub fn init(base: ReaderType, allocator: mem.Allocator) Self {
                     return .{
                         .unbuffered_reader = base,
                         .fifo = FifoType.init(allocator),
diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig
index 3da053e4fb0b..28395526685f 100644
--- a/lib/std/io/reader.zig
+++ b/lib/std/io/reader.zig
@@ -88,7 +88,7 @@ pub fn Reader(
         /// memory would be greater than `max_size`, returns `error.StreamTooLong`.
         /// Caller owns returned memory.
         /// If this function returns an error, the contents from the stream read so far are lost.
-        pub fn readAllAlloc(self: Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
+        pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) ![]u8 {
             var array_list = std.ArrayList(u8).init(allocator);
             defer array_list.deinit();
             try self.readAllArrayList(&array_list, max_size);
@@ -127,7 +127,7 @@ pub fn Reader(
         /// If this function returns an error, the contents from the stream read so far are lost.
         pub fn readUntilDelimiterAlloc(
             self: Self,
-            allocator: *mem.Allocator,
+            allocator: mem.Allocator,
             delimiter: u8,
             max_size: usize,
         ) ![]u8 {
@@ -163,7 +163,7 @@ pub fn Reader(
         /// If this function returns an error, the contents from the stream read so far are lost.
         pub fn readUntilDelimiterOrEofAlloc(
             self: Self,
-            allocator: *mem.Allocator,
+            allocator: mem.Allocator,
             delimiter: u8,
             max_size: usize,
         ) !?[]u8 {
diff --git a/lib/std/json.zig b/lib/std/json.zig
index ff37bc416264..978213a5961c 100644
--- a/lib/std/json.zig
+++ b/lib/std/json.zig
@@ -1476,7 +1476,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
 }
 
 pub const ParseOptions = struct {
-    allocator: ?*Allocator = null,
+    allocator: ?Allocator = null,
 
     /// Behaviour when a duplicate field is encountered.
     duplicate_field_behavior: enum {
@@ -2033,7 +2033,7 @@ test "parse into tagged union" {
 
     { // failing allocations should be bubbled up instantly without trying next member
         var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0);
-        const options = ParseOptions{ .allocator = &fail_alloc.allocator };
+        const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
         const T = union(enum) {
             // both fields here match the input
             string: []const u8,
@@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" {
 
 test "parseFree descends into tagged union" {
     var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1);
-    const options = ParseOptions{ .allocator = &fail_alloc.allocator };
+    const options = ParseOptions{ .allocator = fail_alloc.getAllocator() };
     const T = union(enum) {
         int: i32,
         float: f64,
@@ -2328,7 +2328,7 @@ test "parse into double recursive union definition" {
 
 /// A non-stream JSON parser which constructs a tree of Value's.
 pub const Parser = struct {
-    allocator: *Allocator,
+    allocator: Allocator,
     state: State,
     copy_strings: bool,
     // Stores parent nodes and un-combined Values.
@@ -2341,7 +2341,7 @@ pub const Parser = struct {
         Simple,
     };
 
-    pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+    pub fn init(allocator: Allocator, copy_strings: bool) Parser {
         return Parser{
             .allocator = allocator,
             .state = .Simple,
@@ -2364,9 +2364,10 @@ pub const Parser = struct {
 
         var arena = ArenaAllocator.init(p.allocator);
         errdefer arena.deinit();
+        const allocator = arena.getAllocator();
 
         while (try s.next()) |token| {
-            try p.transition(&arena.allocator, input, s.i - 1, token);
+            try p.transition(allocator, input, s.i - 1, token);
         }
 
         debug.assert(p.stack.items.len == 1);
@@ -2379,7 +2380,7 @@ pub const Parser = struct {
 
     // Even though p.allocator exists, we take an explicit allocator so that allocation state
     // can be cleaned up on error correctly during a `parse` on call.
-    fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void {
+    fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void {
         switch (p.state) {
             .ObjectKey => switch (token) {
                 .ObjectEnd => {
@@ -2536,7 +2537,7 @@ pub const Parser = struct {
         }
     }
 
-    fn parseString(p: *Parser, allocator: *Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
+    fn parseString(p: *Parser, allocator: Allocator, s: std.meta.TagPayload(Token, Token.String), input: []const u8, i: usize) !Value {
         const slice = s.slice(input, i);
         switch (s.escapes) {
             .None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
@@ -2737,7 +2738,7 @@ test "write json then parse it" {
     try testing.expect(mem.eql(u8, tree.root.Object.get("str").?.String, "hello"));
 }
 
-fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
+fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value {
     var p = Parser.init(arena_allocator, false);
     return (try p.parse(json_str)).root;
 }
@@ -2745,13 +2746,13 @@ fn testParse(arena_allocator: *std.mem.Allocator, json_str: []const u8) !Value {
 test "parsing empty string gives appropriate error" {
     var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
     defer arena_allocator.deinit();
-    try testing.expectError(error.UnexpectedEndOfJson, testParse(&arena_allocator.allocator, ""));
+    try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.getAllocator(), ""));
 }
 
 test "integer after float has proper type" {
     var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
     defer arena_allocator.deinit();
-    const json = try testParse(&arena_allocator.allocator,
+    const json = try testParse(arena_allocator.getAllocator(),
         \\{
         \\  "float": 3.14,
         \\  "ints": [1, 2, 3]
@@ -2786,7 +2787,7 @@ test "escaped characters" {
         \\}
     ;
 
-    const obj = (try testParse(&arena_allocator.allocator, input)).Object;
+    const obj = (try testParse(arena_allocator.getAllocator(), input)).Object;
 
     try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\");
     try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/");
@@ -2812,11 +2813,12 @@ test "string copy option" {
 
     var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
     defer arena_allocator.deinit();
+    const allocator = arena_allocator.getAllocator();
 
-    const tree_nocopy = try Parser.init(&arena_allocator.allocator, false).parse(input);
+    const tree_nocopy = try Parser.init(allocator, false).parse(input);
     const obj_nocopy = tree_nocopy.root.Object;
 
-    const tree_copy = try Parser.init(&arena_allocator.allocator, true).parse(input);
+    const tree_copy = try Parser.init(allocator, true).parse(input);
     const obj_copy = tree_copy.root.Object;
 
     for ([_][]const u8{ "noescape", "simple", "unicode", "surrogatepair" }) |field_name| {
diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig
index 61da6ec49be0..2ef6fa3a86c2 100644
--- a/lib/std/json/write_stream.zig
+++ b/lib/std/json/write_stream.zig
@@ -243,7 +243,7 @@ test "json write stream" {
     try w.beginObject();
 
     try w.objectField("object");
-    try w.emitJson(try getJsonObject(&arena_allocator.allocator));
+    try w.emitJson(try getJsonObject(arena_allocator.getAllocator()));
 
     try w.objectField("string");
     try w.emitString("This is a string");
@@ -286,7 +286,7 @@ test "json write stream" {
     try std.testing.expect(std.mem.eql(u8, expected, result));
 }
 
-fn getJsonObject(allocator: *std.mem.Allocator) !std.json.Value {
+fn getJsonObject(allocator: std.mem.Allocator) !std.json.Value {
     var value = std.json.Value{ .Object = std.json.ObjectMap.init(allocator) };
     try value.Object.put("one", std.json.Value{ .Integer = @intCast(i64, 1) });
     try value.Object.put("two", std.json.Value{ .Float = 2.0 });
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index a8ad58be02ca..d7bcf9badcd5 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -142,7 +142,7 @@ pub const Mutable = struct {
 
     /// Asserts that the allocator owns the limbs memory. If this is not the case,
     /// use `toConst().toManaged()`.
-    pub fn toManaged(self: Mutable, allocator: *Allocator) Managed {
+    pub fn toManaged(self: Mutable, allocator: Allocator) Managed {
         return .{
             .allocator = allocator,
             .limbs = self.limbs,
@@ -283,7 +283,7 @@ pub const Mutable = struct {
         base: u8,
         value: []const u8,
         limbs_buffer: []Limb,
-        allocator: ?*Allocator,
+        allocator: ?Allocator,
     ) error{InvalidCharacter}!void {
         assert(base >= 2 and base <= 16);
 
@@ -608,7 +608,7 @@ pub const Mutable = struct {
     /// rma is given by `a.limbs.len + b.limbs.len`.
     ///
     /// `limbs_buffer` is used for temporary storage. The amount required is given by `calcMulLimbsBufferLen`.
-    pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void {
+    pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?Allocator) void {
         var buf_index: usize = 0;
 
         const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: {
@@ -638,7 +638,7 @@ pub const Mutable = struct {
     ///
     /// If `allocator` is provided, it will be used for temporary storage to improve
     /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
-    pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?*Allocator) void {
+    pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?Allocator) void {
         assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
         assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
 
@@ -674,7 +674,7 @@ pub const Mutable = struct {
         signedness: Signedness,
         bit_count: usize,
         limbs_buffer: []Limb,
-        allocator: ?*Allocator,
+        allocator: ?Allocator,
     ) void {
         var buf_index: usize = 0;
         const req_limbs = calcTwosCompLimbCount(bit_count);
@@ -714,7 +714,7 @@ pub const Mutable = struct {
         b: Const,
         signedness: Signedness,
         bit_count: usize,
-        allocator: ?*Allocator,
+        allocator: ?Allocator,
     ) void {
         assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
         assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing
@@ -763,7 +763,7 @@ pub const Mutable = struct {
     ///
     /// If `allocator` is provided, it will be used for temporary storage to improve
     /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
-    pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
+    pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?Allocator) void {
         _ = opt_allocator;
         assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
 
@@ -1660,7 +1660,7 @@ pub const Const = struct {
     positive: bool,
 
     /// The result is an independent resource which is managed by the caller.
-    pub fn toManaged(self: Const, allocator: *Allocator) Allocator.Error!Managed {
+    pub fn toManaged(self: Const, allocator: Allocator) Allocator.Error!Managed {
         const limbs = try allocator.alloc(Limb, math.max(Managed.default_capacity, self.limbs.len));
         mem.copy(Limb, limbs, self.limbs);
         return Managed{
@@ -1873,7 +1873,7 @@ pub const Const = struct {
     /// Caller owns returned memory.
     /// Asserts that `base` is in the range [2, 16].
     /// See also `toString`, a lower level function than this.
-    pub fn toStringAlloc(self: Const, allocator: *Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
+    pub fn toStringAlloc(self: Const, allocator: Allocator, base: u8, case: std.fmt.Case) Allocator.Error![]u8 {
         assert(base >= 2);
         assert(base <= 16);
 
@@ -2092,7 +2092,7 @@ pub const Managed = struct {
     pub const default_capacity = 4;
 
     /// Allocator used by the Managed when requesting memory.
-    allocator: *Allocator,
+    allocator: Allocator,
 
     /// Raw digits. These are:
     ///
@@ -2109,7 +2109,7 @@ pub const Managed = struct {
 
     /// Creates a new `Managed`. `default_capacity` limbs will be allocated immediately.
     /// The integer value after initializing is `0`.
-    pub fn init(allocator: *Allocator) !Managed {
+    pub fn init(allocator: Allocator) !Managed {
         return initCapacity(allocator, default_capacity);
     }
 
@@ -2131,7 +2131,7 @@ pub const Managed = struct {
     /// Creates a new `Managed` with value `value`.
     ///
     /// This is identical to an `init`, followed by a `set`.
-    pub fn initSet(allocator: *Allocator, value: anytype) !Managed {
+    pub fn initSet(allocator: Allocator, value: anytype) !Managed {
         var s = try Managed.init(allocator);
         try s.set(value);
         return s;
@@ -2140,7 +2140,7 @@ pub const Managed = struct {
     /// Creates a new Managed with a specific capacity. If capacity < default_capacity then the
     /// default capacity will be used instead.
     /// The integer value after initializing is `0`.
-    pub fn initCapacity(allocator: *Allocator, capacity: usize) !Managed {
+    pub fn initCapacity(allocator: Allocator, capacity: usize) !Managed {
         return Managed{
             .allocator = allocator,
             .metadata = 1,
@@ -2206,7 +2206,7 @@ pub const Managed = struct {
         return other.cloneWithDifferentAllocator(other.allocator);
     }
 
-    pub fn cloneWithDifferentAllocator(other: Managed, allocator: *Allocator) !Managed {
+    pub fn cloneWithDifferentAllocator(other: Managed, allocator: Allocator) !Managed {
         return Managed{
             .allocator = allocator,
             .metadata = other.metadata,
@@ -2347,7 +2347,7 @@ pub const Managed = struct {
 
     /// Converts self to a string in the requested base. Memory is allocated from the provided
     /// allocator and not the one present in self.
-    pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
+    pub fn toString(self: Managed, allocator: Allocator, base: u8, case: std.fmt.Case) ![]u8 {
         _ = allocator;
         if (base < 2 or base > 16) return error.InvalidBase;
         return self.toConst().toStringAlloc(self.allocator, base, case);
@@ -2784,7 +2784,7 @@ const AccOp = enum {
 /// r MUST NOT alias any of a or b.
 ///
 /// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
-fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
+fn llmulacc(comptime op: AccOp, opt_allocator: ?Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
     @setRuntimeSafety(debug_safety);
     assert(r.len >= a.len);
     assert(r.len >= b.len);
@@ -2819,7 +2819,7 @@ fn llmulacc(comptime op: AccOp, opt_allocator: ?*Allocator, r: []Limb, a: []cons
 /// The result is computed modulo `r.len`. When `r.len >= a.len + b.len`, no overflow occurs.
 fn llmulaccKaratsuba(
     comptime op: AccOp,
-    allocator: *Allocator,
+    allocator: Allocator,
     r: []Limb,
     a: []const Limb,
     b: []const Limb,
diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig
index 1f66417496ca..de6804ca017e 100644
--- a/lib/std/math/big/rational.zig
+++ b/lib/std/math/big/rational.zig
@@ -29,7 +29,7 @@ pub const Rational = struct {
 
     /// Create a new Rational. A small amount of memory will be allocated on initialization.
     /// This will be 2 * Int.default_capacity.
-    pub fn init(a: *Allocator) !Rational {
+    pub fn init(a: Allocator) !Rational {
         return Rational{
             .p = try Int.init(a),
             .q = try Int.initSet(a, 1),
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index 0390733b3d2d..b5dc50191178 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -37,24 +37,26 @@ pub const Allocator = @import("mem/Allocator.zig");
 pub fn ValidationAllocator(comptime T: type) type {
     return struct {
         const Self = @This();
-        allocator: Allocator,
+
         underlying_allocator: T,
+
         pub fn init(allocator: T) @This() {
             return .{
-                .allocator = .{
-                    .allocFn = alloc,
-                    .resizeFn = resize,
-                },
                 .underlying_allocator = allocator,
             };
         }
-        fn getUnderlyingAllocatorPtr(self: *@This()) *Allocator {
-            if (T == *Allocator) return self.underlying_allocator;
-            if (*T == *Allocator) return &self.underlying_allocator;
-            return &self.underlying_allocator.allocator;
+
+        pub fn getAllocator(self: *Self) Allocator {
+            return Allocator.init(self, alloc, resize);
         }
+
+        fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
+            if (T == Allocator) return self.underlying_allocator;
+            return self.underlying_allocator.getAllocator();
+        }
+
         pub fn alloc(
-            allocator: *Allocator,
+            self: *Self,
             n: usize,
             ptr_align: u29,
             len_align: u29,
@@ -67,9 +69,8 @@ pub fn ValidationAllocator(comptime T: type) type {
                 assert(n >= len_align);
             }
 
-            const self = @fieldParentPtr(@This(), "allocator", allocator);
             const underlying = self.getUnderlyingAllocatorPtr();
-            const result = try underlying.allocFn(underlying, n, ptr_align, len_align, ret_addr);
+            const result = try underlying.allocFn(underlying.ptr, n, ptr_align, len_align, ret_addr);
             assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
             if (len_align == 0) {
                 assert(result.len == n);
@@ -79,8 +80,9 @@ pub fn ValidationAllocator(comptime T: type) type {
             }
             return result;
         }
+
         pub fn resize(
-            allocator: *Allocator,
+            self: *Self,
             buf: []u8,
             buf_align: u29,
             new_len: usize,
@@ -92,9 +94,8 @@ pub fn ValidationAllocator(comptime T: type) type {
                 assert(mem.isAlignedAnyAlign(new_len, len_align));
                 assert(new_len >= len_align);
             }
-            const self = @fieldParentPtr(@This(), "allocator", allocator);
             const underlying = self.getUnderlyingAllocatorPtr();
-            const result = try underlying.resizeFn(underlying, buf, buf_align, new_len, len_align, ret_addr);
+            const result = try underlying.resizeFn(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr);
             if (len_align == 0) {
                 assert(result == new_len);
             } else {
@@ -103,7 +104,7 @@ pub fn ValidationAllocator(comptime T: type) type {
             }
             return result;
         }
-        pub usingnamespace if (T == *Allocator or !@hasDecl(T, "reset")) struct {} else struct {
+        pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
             pub fn reset(self: *Self) void {
                 self.underlying_allocator.reset();
             }
@@ -130,12 +131,14 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
     return adjusted;
 }
 
-var failAllocator = Allocator{
-    .allocFn = failAllocatorAlloc,
-    .resizeFn = Allocator.noResize,
+const failAllocator = blk: {
+    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
+    // allowing the use of `*void` but it would still be ugly
+    var tmp: u1 = 0;
+    break :blk Allocator.init(&tmp, failAllocatorAlloc, Allocator.NoResize(u1).noResize);
 };
-fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
-    _ = self;
+
+fn failAllocatorAlloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
     _ = n;
     _ = alignment;
     _ = len_align;
@@ -1786,18 +1789,18 @@ pub fn SplitIterator(comptime T: type) type {
 
 /// Naively combines a series of slices with a separator.
 /// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
     return joinMaybeZ(allocator, separator, slices, false);
 }
 
 /// Naively combines a series of slices with a separator and null terminator.
 /// Allocates memory for the result, which must be freed by the caller.
-pub fn joinZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
+pub fn joinZ(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![:0]u8 {
     const out = try joinMaybeZ(allocator, separator, slices, true);
     return out[0 .. out.len - 1 :0];
 }
 
-fn joinMaybeZ(allocator: *Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
+fn joinMaybeZ(allocator: Allocator, separator: []const u8, slices: []const []const u8, zero: bool) ![]u8 {
     if (slices.len == 0) return if (zero) try allocator.dupe(u8, &[1]u8{0}) else &[0]u8{};
 
     const total_len = blk: {
@@ -1876,7 +1879,7 @@ test "mem.joinZ" {
 }
 
 /// Copies each T from slices into a new slice that exactly holds all the elements.
-pub fn concat(allocator: *Allocator, comptime T: type, slices: []const []const T) ![]T {
+pub fn concat(allocator: Allocator, comptime T: type, slices: []const []const T) ![]T {
     if (slices.len == 0) return &[0]T{};
 
     const total_len = blk: {
@@ -2318,7 +2321,7 @@ test "replacementSize" {
 }
 
 /// Perform a replacement on an allocated buffer of pre-determined size. Caller must free returned memory.
-pub fn replaceOwned(comptime T: type, allocator: *Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
+pub fn replaceOwned(comptime T: type, allocator: Allocator, input: []const T, needle: []const T, replacement: []const T) Allocator.Error![]T {
     var output = try allocator.alloc(T, replacementSize(T, input, needle, replacement));
     _ = replace(T, input, needle, replacement, output);
     return output;
diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig
index a3c0995496f7..8478120b003a 100644
--- a/lib/std/mem/Allocator.zig
+++ b/lib/std/mem/Allocator.zig
@@ -8,6 +8,9 @@ const Allocator = @This();
 
 pub const Error = error{OutOfMemory};
 
+// The type erased pointer to the allocator implementation
+ptr: *c_void,
+
 /// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
 ///
 /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
@@ -17,7 +20,7 @@ pub const Error = error{OutOfMemory};
 ///
 /// `ret_addr` is optionally provided as the first return address of the allocation call stack.
 /// If the value is `0` it means no return address has been provided.
-allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
+allocFn: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
 
 /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
 /// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value
@@ -39,24 +42,56 @@ allocFn: fn (self: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_a
 ///
 /// `ret_addr` is optionally provided as the first return address of the allocation call stack.
 /// If the value is `0` it means no return address has been provided.
-resizeFn: fn (self: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+resizeFn: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+
+pub fn init(
+    pointer: anytype,
+    comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
+    comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize,
+) Allocator {
+    const Ptr = @TypeOf(pointer);
+    assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
+    assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
+    const gen = struct {
+        fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
+            const alignment = @typeInfo(Ptr).Pointer.alignment;
+            const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+            return allocFn(self, len, ptr_align, len_align, ret_addr);
+        }
+        fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize {
+            const alignment = @typeInfo(Ptr).Pointer.alignment;
+            const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
+            return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr);
+        }
+    };
 
-/// Set to resizeFn if in-place resize is not supported.
-pub fn noResize(
-    self: *Allocator,
-    buf: []u8,
-    buf_align: u29,
-    new_len: usize,
-    len_align: u29,
-    ret_addr: usize,
-) Error!usize {
-    _ = self;
-    _ = buf_align;
-    _ = len_align;
-    _ = ret_addr;
-    if (new_len > buf.len)
-        return error.OutOfMemory;
-    return new_len;
+    return .{
+        .ptr = pointer,
+        .allocFn = gen.alloc,
+        .resizeFn = gen.resize,
+    };
+}
+
+/// Set resizeFn to `NoResize(AllocatorType).noResize` if in-place resize is not supported.
+pub fn NoResize(comptime AllocatorType: type) type {
+    return struct {
+        pub fn noResize(
+            self: *AllocatorType,
+            buf: []u8,
+            buf_align: u29,
+            new_len: usize,
+            len_align: u29,
+            ret_addr: usize,
+        ) Error!usize {
+            _ = self;
+            _ = buf_align;
+            _ = len_align;
+            _ = ret_addr;
+            if (new_len > buf.len)
+                return error.OutOfMemory;
+            return new_len;
+        }
+    };
 }
 
 /// Realloc is used to modify the size or alignment of an existing allocation,
@@ -80,8 +115,8 @@ pub fn noResize(
 /// as `old_mem` was when `reallocFn` is called. The bytes of
 /// `return_value[old_mem.len..]` have undefined values.
 /// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
-pub fn reallocBytes(
-    self: *Allocator,
+fn reallocBytes(
+    self: Allocator,
     /// Guaranteed to be the same as what was returned from most recent call to
     /// `allocFn` or `resizeFn`.
     /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
@@ -106,7 +141,7 @@ pub fn reallocBytes(
     return_address: usize,
 ) Error![]u8 {
     if (old_mem.len == 0) {
-        const new_mem = try self.allocFn(self, new_byte_count, new_alignment, len_align, return_address);
+        const new_mem = try self.allocFn(self.ptr, new_byte_count, new_alignment, len_align, return_address);
         // TODO: https://github.com/ziglang/zig/issues/4298
         @memset(new_mem.ptr, undefined, new_byte_count);
         return new_mem;
@@ -117,7 +152,7 @@ pub fn reallocBytes(
             const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
             return old_mem.ptr[0..shrunk_len];
         }
-        if (self.resizeFn(self, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
+        if (self.resizeFn(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
             assert(resized_len >= new_byte_count);
             // TODO: https://github.com/ziglang/zig/issues/4298
             @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
@@ -133,7 +168,7 @@ pub fn reallocBytes(
 /// Move the given memory to a new location in the given allocator to accomodate a new
 /// size and alignment.
 fn moveBytes(
-    self: *Allocator,
+    self: Allocator,
     old_mem: []u8,
     old_align: u29,
     new_len: usize,
@@ -143,7 +178,7 @@ fn moveBytes(
 ) Error![]u8 {
     assert(old_mem.len > 0);
     assert(new_len > 0);
-    const new_mem = try self.allocFn(self, new_len, new_alignment, len_align, return_address);
+    const new_mem = try self.allocFn(self.ptr, new_len, new_alignment, len_align, return_address);
     @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len));
     // TODO https://github.com/ziglang/zig/issues/4298
     @memset(old_mem.ptr, undefined, old_mem.len);
@@ -153,7 +188,7 @@ fn moveBytes(
 
 /// Returns a pointer to undefined memory.
 /// Call `destroy` with the result to free the memory.
-pub fn create(self: *Allocator, comptime T: type) Error!*T {
+pub fn create(self: Allocator, comptime T: type) Error!*T {
     if (@sizeOf(T) == 0) return @as(*T, undefined);
     const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
     return &slice[0];
@@ -161,7 +196,7 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
 
 /// `ptr` should be the return value of `create`, or otherwise
 /// have the same address and alignment property.
-pub fn destroy(self: *Allocator, ptr: anytype) void {
+pub fn destroy(self: Allocator, ptr: anytype) void {
     const info = @typeInfo(@TypeOf(ptr)).Pointer;
     const T = info.child;
     if (@sizeOf(T) == 0) return;
@@ -177,12 +212,12 @@ pub fn destroy(self: *Allocator, ptr: anytype) void {
 /// call `free` when done.
 ///
 /// For allocating a single item, see `create`.
-pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
+pub fn alloc(self: Allocator, comptime T: type, n: usize) Error![]T {
     return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
 }
 
 pub fn allocWithOptions(
-    self: *Allocator,
+    self: Allocator,
     comptime Elem: type,
     n: usize,
     /// null means naturally aligned
@@ -193,7 +228,7 @@ pub fn allocWithOptions(
 }
 
 pub fn allocWithOptionsRetAddr(
-    self: *Allocator,
+    self: Allocator,
     comptime Elem: type,
     n: usize,
     /// null means naturally aligned
@@ -227,7 +262,7 @@ fn AllocWithOptionsPayload(comptime Elem: type, comptime alignment: ?u29, compti
 ///
 /// For allocating a single item, see `create`.
 pub fn allocSentinel(
-    self: *Allocator,
+    self: Allocator,
     comptime Elem: type,
     n: usize,
     comptime sentinel: Elem,
@@ -236,7 +271,7 @@ pub fn allocSentinel(
 }
 
 pub fn alignedAlloc(
-    self: *Allocator,
+    self: Allocator,
     comptime T: type,
     /// null means naturally aligned
     comptime alignment: ?u29,
@@ -246,7 +281,7 @@ pub fn alignedAlloc(
 }
 
 pub fn allocAdvanced(
-    self: *Allocator,
+    self: Allocator,
     comptime T: type,
     /// null means naturally aligned
     comptime alignment: ?u29,
@@ -259,7 +294,7 @@ pub fn allocAdvanced(
 pub const Exact = enum { exact, at_least };
 
 pub fn allocAdvancedWithRetAddr(
-    self: *Allocator,
+    self: Allocator,
     comptime T: type,
     /// null means naturally aligned
     comptime alignment: ?u29,
@@ -285,7 +320,7 @@ pub fn allocAdvancedWithRetAddr(
         .exact => 0,
         .at_least => size_of_T,
     };
-    const byte_slice = try self.allocFn(self, byte_count, a, len_align, return_address);
+    const byte_slice = try self.allocFn(self.ptr, byte_count, a, len_align, return_address);
     switch (exact) {
         .exact => assert(byte_slice.len == byte_count),
         .at_least => assert(byte_slice.len >= byte_count),
@@ -301,7 +336,7 @@ pub fn allocAdvancedWithRetAddr(
 }
 
 /// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
-pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
+pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) {
     const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
     const T = Slice.child;
     if (new_n == 0) {
@@ -310,7 +345,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
     }
     const old_byte_slice = mem.sliceAsBytes(old_mem);
     const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
-    const rc = try self.resizeFn(self, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
+    const rc = try self.resizeFn(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress());
     assert(rc == new_byte_count);
     const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
     return mem.bytesAsSlice(T, new_byte_slice);
@@ -326,7 +361,7 @@ pub fn resize(self: *Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(ol
 /// in `std.ArrayList.shrink`.
 /// If you need guaranteed success, call `shrink`.
 /// If `new_n` is 0, this is the same as `free` and it always succeeds.
-pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
     const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
     break :t Error![]align(Slice.alignment) Slice.child;
 } {
@@ -334,7 +369,7 @@ pub fn realloc(self: *Allocator, old_mem: anytype, new_n: usize) t: {
     return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
 }
 
-pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn reallocAtLeast(self: Allocator, old_mem: anytype, new_n: usize) t: {
     const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
     break :t Error![]align(Slice.alignment) Slice.child;
 } {
@@ -346,7 +381,7 @@ pub fn reallocAtLeast(self: *Allocator, old_mem: anytype, new_n: usize) t: {
 /// a new alignment, which can be larger, smaller, or the same as the old
 /// allocation.
 pub fn reallocAdvanced(
-    self: *Allocator,
+    self: Allocator,
     old_mem: anytype,
     comptime new_alignment: u29,
     new_n: usize,
@@ -356,7 +391,7 @@ pub fn reallocAdvanced(
 }
 
 pub fn reallocAdvancedWithRetAddr(
-    self: *Allocator,
+    self: Allocator,
     old_mem: anytype,
     comptime new_alignment: u29,
     new_n: usize,
@@ -389,7 +424,7 @@ pub fn reallocAdvancedWithRetAddr(
 /// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
 /// Returned slice has same alignment as old_mem.
 /// Shrinking to 0 is the same as calling `free`.
-pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
+pub fn shrink(self: Allocator, old_mem: anytype, new_n: usize) t: {
     const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
     break :t []align(Slice.alignment) Slice.child;
 } {
@@ -401,7 +436,7 @@ pub fn shrink(self: *Allocator, old_mem: anytype, new_n: usize) t: {
 /// a new alignment, which must be smaller or the same as the old
 /// allocation.
 pub fn alignedShrink(
-    self: *Allocator,
+    self: Allocator,
     old_mem: anytype,
     comptime new_alignment: u29,
     new_n: usize,
@@ -413,7 +448,7 @@ pub fn alignedShrink(
 /// the return address of the first stack frame, which may be relevant for
 /// allocators which collect stack traces.
 pub fn alignedShrinkWithRetAddr(
-    self: *Allocator,
+    self: Allocator,
     old_mem: anytype,
     comptime new_alignment: u29,
     new_n: usize,
@@ -440,7 +475,7 @@ pub fn alignedShrinkWithRetAddr(
 
 /// Free an array allocated with `alloc`. To free a single item,
 /// see `destroy`.
-pub fn free(self: *Allocator, memory: anytype) void {
+pub fn free(self: Allocator, memory: anytype) void {
     const Slice = @typeInfo(@TypeOf(memory)).Pointer;
     const bytes = mem.sliceAsBytes(memory);
     const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
@@ -452,14 +487,14 @@ pub fn free(self: *Allocator, memory: anytype) void {
 }
 
 /// Copies `m` to newly allocated memory. Caller owns the memory.
-pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: Allocator, comptime T: type, m: []const T) ![]T {
     const new_buf = try allocator.alloc(T, m.len);
     mem.copy(T, new_buf, m);
     return new_buf;
 }
 
 /// Copies `m` to newly allocated memory, with a null-terminated element. Caller owns the memory.
-pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
+pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
     const new_buf = try allocator.alloc(T, m.len + 1);
     mem.copy(T, new_buf, m);
     new_buf[m.len] = 0;
@@ -471,7 +506,7 @@ pub fn dupeZ(allocator: *Allocator, comptime T: type, m: []const T) ![:0]T {
 /// This function allows a runtime `buf_align` value. Callers should generally prefer
 /// to call `shrink` directly.
 pub fn shrinkBytes(
-    self: *Allocator,
+    self: Allocator,
     buf: []u8,
     buf_align: u29,
     new_len: usize,
@@ -479,5 +514,5 @@ pub fn shrinkBytes(
     return_address: usize,
 ) usize {
     assert(new_len <= buf.len);
-    return self.resizeFn(self, buf, buf_align, new_len, len_align, return_address) catch unreachable;
+    return self.resizeFn(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable;
 }
diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig
index 2e36eacd7fef..a651076aba43 100644
--- a/lib/std/multi_array_list.zig
+++ b/lib/std/multi_array_list.zig
@@ -59,7 +59,7 @@ pub fn MultiArrayList(comptime S: type) type {
                 };
             }
 
-            pub fn deinit(self: *Slice, gpa: *Allocator) void {
+            pub fn deinit(self: *Slice, gpa: Allocator) void {
                 var other = self.toMultiArrayList();
                 other.deinit(gpa);
                 self.* = undefined;
@@ -106,7 +106,7 @@ pub fn MultiArrayList(comptime S: type) type {
         };
 
         /// Release all allocated memory.
-        pub fn deinit(self: *Self, gpa: *Allocator) void {
+        pub fn deinit(self: *Self, gpa: Allocator) void {
             gpa.free(self.allocatedBytes());
             self.* = undefined;
         }
@@ -161,7 +161,7 @@ pub fn MultiArrayList(comptime S: type) type {
         }
 
         /// Extend the list by 1 element. Allocates more memory as necessary.
-        pub fn append(self: *Self, gpa: *Allocator, elem: S) !void {
+        pub fn append(self: *Self, gpa: Allocator, elem: S) !void {
             try self.ensureUnusedCapacity(gpa, 1);
             self.appendAssumeCapacity(elem);
         }
@@ -188,7 +188,7 @@ pub fn MultiArrayList(comptime S: type) type {
         /// after and including the specified index back by one and
         /// sets the given index to the specified element.  May reallocate
         /// and invalidate iterators.
-        pub fn insert(self: *Self, gpa: *Allocator, index: usize, elem: S) void {
+        pub fn insert(self: *Self, gpa: Allocator, index: usize, elem: S) void {
             try self.ensureUnusedCapacity(gpa, 1);
             self.insertAssumeCapacity(index, elem);
         }
@@ -242,7 +242,7 @@ pub fn MultiArrayList(comptime S: type) type {
 
         /// Adjust the list's length to `new_len`.
         /// Does not initialize added items, if any.
-        pub fn resize(self: *Self, gpa: *Allocator, new_len: usize) !void {
+        pub fn resize(self: *Self, gpa: Allocator, new_len: usize) !void {
             try self.ensureTotalCapacity(gpa, new_len);
             self.len = new_len;
         }
@@ -250,7 +250,7 @@ pub fn MultiArrayList(comptime S: type) type {
         /// Attempt to reduce allocated capacity to `new_len`.
         /// If `new_len` is greater than zero, this may fail to reduce the capacity,
         /// but the data remains intact and the length is updated to new_len.
-        pub fn shrinkAndFree(self: *Self, gpa: *Allocator, new_len: usize) void {
+        pub fn shrinkAndFree(self: *Self, gpa: Allocator, new_len: usize) void {
             if (new_len == 0) {
                 gpa.free(self.allocatedBytes());
                 self.* = .{};
@@ -314,7 +314,7 @@ pub fn MultiArrayList(comptime S: type) type {
         /// Modify the array so that it can hold at least `new_capacity` items.
         /// Implements super-linear growth to achieve amortized O(1) append operations.
         /// Invalidates pointers if additional memory is needed.
-        pub fn ensureTotalCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
+        pub fn ensureTotalCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
             var better_capacity = self.capacity;
             if (better_capacity >= new_capacity) return;
 
@@ -328,14 +328,14 @@ pub fn MultiArrayList(comptime S: type) type {
 
         /// Modify the array so that it can hold at least `additional_count` **more** items.
         /// Invalidates pointers if additional memory is needed.
-        pub fn ensureUnusedCapacity(self: *Self, gpa: *Allocator, additional_count: usize) !void {
+        pub fn ensureUnusedCapacity(self: *Self, gpa: Allocator, additional_count: usize) !void {
             return self.ensureTotalCapacity(gpa, self.len + additional_count);
         }
 
         /// Modify the array so that it can hold exactly `new_capacity` items.
         /// Invalidates pointers if additional memory is needed.
         /// `new_capacity` must be greater or equal to `len`.
-        pub fn setCapacity(self: *Self, gpa: *Allocator, new_capacity: usize) !void {
+        pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
             assert(new_capacity >= self.len);
             const new_bytes = try gpa.allocAdvanced(
                 u8,
@@ -372,7 +372,7 @@ pub fn MultiArrayList(comptime S: type) type {
 
         /// Create a copy of this list with a new backing store,
         /// using the specified allocator.
-        pub fn clone(self: Self, gpa: *Allocator) !Self {
+        pub fn clone(self: Self, gpa: Allocator) !Self {
             var result = Self{};
             errdefer result.deinit(gpa);
             try result.ensureTotalCapacity(gpa, self.len);
diff --git a/lib/std/net.zig b/lib/std/net.zig
index 759adaa75673..4f5ce84034b0 100644
--- a/lib/std/net.zig
+++ b/lib/std/net.zig
@@ -664,7 +664,7 @@ pub const AddressList = struct {
 };
 
 /// All memory allocated with `allocator` will be freed before this function returns.
-pub fn tcpConnectToHost(allocator: *mem.Allocator, name: []const u8, port: u16) !Stream {
+pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) !Stream {
     const list = try getAddressList(allocator, name, port);
     defer list.deinit();
 
@@ -699,12 +699,12 @@ pub fn tcpConnectToAddress(address: Address) !Stream {
 }
 
 /// Call `AddressList.deinit` on the result.
-pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*AddressList {
+pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*AddressList {
     const result = blk: {
         var arena = std.heap.ArenaAllocator.init(allocator);
         errdefer arena.deinit();
 
-        const result = try arena.allocator.create(AddressList);
+        const result = try arena.getAllocator().create(AddressList);
         result.* = AddressList{
             .arena = arena,
             .addrs = undefined,
@@ -712,7 +712,7 @@ pub fn getAddressList(allocator: *mem.Allocator, name: []const u8, port: u16) !*
         };
         break :blk result;
     };
-    const arena = &result.arena.allocator;
+    const arena = result.arena.getAllocator();
     errdefer result.arena.deinit();
 
     if (builtin.target.os.tag == .windows or builtin.link_libc) {
@@ -1303,7 +1303,7 @@ const ResolvConf = struct {
 
 /// Ignores lines longer than 512 bytes.
 /// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
-fn getResolvConf(allocator: *mem.Allocator, rc: *ResolvConf) !void {
+fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
     rc.* = ResolvConf{
         .ns = std.ArrayList(LookupAddr).init(allocator),
         .search = std.ArrayList(u8).init(allocator),
diff --git a/lib/std/net/test.zig b/lib/std/net/test.zig
index 1742fb294713..f181bb49eaab 100644
--- a/lib/std/net/test.zig
+++ b/lib/std/net/test.zig
@@ -230,7 +230,7 @@ test "listen on ipv4 try connect on ipv6 then ipv4" {
     try await client_frame;
 }
 
-fn testClientToHost(allocator: *mem.Allocator, name: []const u8, port: u16) anyerror!void {
+fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
     if (builtin.os.tag == .wasi) return error.SkipZigTest;
 
     const connection = try net.tcpConnectToHost(allocator, name, port);
diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig
index d3c8d13bd123..fb5105706c4f 100644
--- a/lib/std/os/test.zig
+++ b/lib/std/os/test.zig
@@ -58,10 +58,11 @@ test "open smoke test" {
     // Get base abs path
     var arena = ArenaAllocator.init(testing.allocator);
     defer arena.deinit();
+    const allocator = arena.getAllocator();
 
     const base_path = blk: {
-        const relative_path = try fs.path.join(&arena.allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
-        break :blk try fs.realpathAlloc(&arena.allocator, relative_path);
+        const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] });
+        break :blk try fs.realpathAlloc(allocator, relative_path);
     };
 
     var file_path: []u8 = undefined;
@@ -69,34 +70,34 @@ test "open smoke test" {
     const mode: os.mode_t = if (native_os == .windows) 0 else 0o666;
 
     // Create some file using `open`.
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
     fd = try os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode);
     os.close(fd);
 
     // Try this again with the same flags. This op should fail with error.PathAlreadyExists.
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
     try expectError(error.PathAlreadyExists, os.open(file_path, os.O.RDWR | os.O.CREAT | os.O.EXCL, mode));
 
     // Try opening without `O.EXCL` flag.
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
     fd = try os.open(file_path, os.O.RDWR | os.O.CREAT, mode);
     os.close(fd);
 
     // Try opening as a directory which should fail.
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_file" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_file" });
     try expectError(error.NotDir, os.open(file_path, os.O.RDWR | os.O.DIRECTORY, mode));
 
     // Create some directory
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
     try os.mkdir(file_path, mode);
 
     // Open dir using `open`
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
     fd = try os.open(file_path, os.O.RDONLY | os.O.DIRECTORY, mode);
     os.close(fd);
 
     // Try opening as file which should fail.
-    file_path = try fs.path.join(&arena.allocator, &[_][]const u8{ base_path, "some_dir" });
+    file_path = try fs.path.join(allocator, &[_][]const u8{ base_path, "some_dir" });
     try expectError(error.IsDir, os.open(file_path, os.O.RDWR, mode));
 }
 
diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig
index 26688d028e04..0a484fed314f 100644
--- a/lib/std/pdb.zig
+++ b/lib/std/pdb.zig
@@ -460,7 +460,7 @@ pub const PDBStringTableHeader = packed struct {
     ByteSize: u32,
 };
 
-fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
+fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 {
     const num_words = try stream.readIntLittle(u32);
     var list = ArrayList(u32).init(allocator);
     errdefer list.deinit();
@@ -481,7 +481,7 @@ fn readSparseBitVector(stream: anytype, allocator: *mem.Allocator) ![]u32 {
 pub const Pdb = struct {
     in_file: File,
     msf: Msf,
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     string_table: ?*MsfStream,
     dbi: ?*MsfStream,
     modules: []Module,
@@ -500,7 +500,7 @@ pub const Pdb = struct {
         checksum_offset: ?usize,
     };
 
-    pub fn init(allocator: *mem.Allocator, path: []const u8) !Pdb {
+    pub fn init(allocator: mem.Allocator, path: []const u8) !Pdb {
         const file = try fs.cwd().openFile(path, .{ .intended_io_mode = .blocking });
         errdefer file.close();
 
@@ -858,7 +858,7 @@ const Msf = struct {
     directory: MsfStream,
     streams: []MsfStream,
 
-    fn init(allocator: *mem.Allocator, file: File) !Msf {
+    fn init(allocator: mem.Allocator, file: File) !Msf {
         const in = file.reader();
 
         const superblock = try in.readStruct(SuperBlock);
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index be81abd96ccc..289ad9480ff1 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -21,10 +21,10 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
 
         items: []T,
         len: usize,
-        allocator: *Allocator,
+        allocator: Allocator,
 
         /// Initialize and return a new priority dequeue.
-        pub fn init(allocator: *Allocator) Self {
+        pub fn init(allocator: Allocator) Self {
             return Self{
                 .items = &[_]T{},
                 .len = 0,
@@ -336,7 +336,7 @@ pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) ty
         /// Dequeue takes ownership of the passed in slice. The slice must have been
         /// allocated with `allocator`.
         /// De-initialize with `deinit`.
-        pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
+        pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
             var queue = Self{
                 .items = items,
                 .len = items.len,
@@ -945,7 +945,7 @@ fn fuzzTestMinMax(rng: std.rand.Random, queue_size: usize) !void {
     }
 }
 
-fn generateRandomSlice(allocator: *std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
+fn generateRandomSlice(allocator: std.mem.Allocator, rng: std.rand.Random, size: usize) ![]u32 {
     var array = std.ArrayList(u32).init(allocator);
     try array.ensureTotalCapacity(size);
 
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index 6d4b6634a465..1ae958f4e4cf 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -20,10 +20,10 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
 
         items: []T,
         len: usize,
-        allocator: *Allocator,
+        allocator: Allocator,
 
         /// Initialize and return a priority queue.
-        pub fn init(allocator: *Allocator) Self {
+        pub fn init(allocator: Allocator) Self {
             return Self{
                 .items = &[_]T{},
                 .len = 0,
@@ -153,7 +153,7 @@ pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order
         /// PriorityQueue takes ownership of the passed in slice. The slice must have been
         /// allocated with `allocator`.
         /// Deinitialize with `deinit`.
-        pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
+        pub fn fromOwnedSlice(allocator: Allocator, items: []T) Self {
             var queue = Self{
                 .items = items,
                 .len = items.len,
diff --git a/lib/std/process.zig b/lib/std/process.zig
index 0e7b5b25ec31..6b45a7e7aa34 100644
--- a/lib/std/process.zig
+++ b/lib/std/process.zig
@@ -21,7 +21,7 @@ pub fn getCwd(out_buffer: []u8) ![]u8 {
 }
 
 /// Caller must free the returned memory.
-pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
+pub fn getCwdAlloc(allocator: Allocator) ![]u8 {
     // The use of MAX_PATH_BYTES here is just a heuristic: most paths will fit
     // in stack_buf, avoiding an extra allocation in the common case.
     var stack_buf: [fs.MAX_PATH_BYTES]u8 = undefined;
@@ -54,7 +54,7 @@ test "getCwdAlloc" {
 }
 
 /// Caller owns resulting `BufMap`.
-pub fn getEnvMap(allocator: *Allocator) !BufMap {
+pub fn getEnvMap(allocator: Allocator) !BufMap {
     var result = BufMap.init(allocator);
     errdefer result.deinit();
 
@@ -154,7 +154,7 @@ pub const GetEnvVarOwnedError = error{
 };
 
 /// Caller must free returned memory.
-pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
+pub fn getEnvVarOwned(allocator: mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
     if (builtin.os.tag == .windows) {
         const result_w = blk: {
             const key_w = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
@@ -183,10 +183,10 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool {
     }
 }
 
-pub fn hasEnvVar(allocator: *Allocator, key: []const u8) error{OutOfMemory}!bool {
+pub fn hasEnvVar(allocator: Allocator, key: []const u8) error{OutOfMemory}!bool {
     if (builtin.os.tag == .windows) {
         var stack_alloc = std.heap.stackFallback(256 * @sizeOf(u16), allocator);
-        const key_w = try std.unicode.utf8ToUtf16LeWithNull(&stack_alloc.allocator, key);
+        const key_w = try std.unicode.utf8ToUtf16LeWithNull(stack_alloc.get(), key);
         defer stack_alloc.allocator.free(key_w);
         return std.os.getenvW(key_w) != null;
     } else {
@@ -227,7 +227,7 @@ pub const ArgIteratorPosix = struct {
 };
 
 pub const ArgIteratorWasi = struct {
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     index: usize,
     args: [][:0]u8,
 
@@ -235,7 +235,7 @@ pub const ArgIteratorWasi = struct {
 
     /// You must call deinit to free the internal buffer of the
     /// iterator after you are done.
-    pub fn init(allocator: *mem.Allocator) InitError!ArgIteratorWasi {
+    pub fn init(allocator: mem.Allocator) InitError!ArgIteratorWasi {
         const fetched_args = try ArgIteratorWasi.internalInit(allocator);
         return ArgIteratorWasi{
             .allocator = allocator,
@@ -244,7 +244,7 @@ pub const ArgIteratorWasi = struct {
         };
     }
 
-    fn internalInit(allocator: *mem.Allocator) InitError![][:0]u8 {
+    fn internalInit(allocator: mem.Allocator) InitError![][:0]u8 {
         const w = os.wasi;
         var count: usize = undefined;
         var buf_size: usize = undefined;
@@ -325,7 +325,7 @@ pub const ArgIteratorWindows = struct {
     }
 
     /// You must free the returned memory when done.
-    pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![:0]u8) {
+    pub fn next(self: *ArgIteratorWindows, allocator: Allocator) ?(NextError![:0]u8) {
         // march forward over whitespace
         while (true) : (self.index += 1) {
             const character = self.getPointAtIndex();
@@ -379,7 +379,7 @@ pub const ArgIteratorWindows = struct {
         }
     }
 
-    fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![:0]u8 {
+    fn internalNext(self: *ArgIteratorWindows, allocator: Allocator) NextError![:0]u8 {
         var buf = std.ArrayList(u16).init(allocator);
         defer buf.deinit();
 
@@ -423,7 +423,7 @@ pub const ArgIteratorWindows = struct {
         }
     }
 
-    fn convertFromWindowsCmdLineToUTF8(allocator: *Allocator, buf: []u16) NextError![:0]u8 {
+    fn convertFromWindowsCmdLineToUTF8(allocator: Allocator, buf: []u16) NextError![:0]u8 {
         return std.unicode.utf16leToUtf8AllocZ(allocator, buf) catch |err| switch (err) {
             error.ExpectedSecondSurrogateHalf,
             error.DanglingSurrogateHalf,
@@ -463,7 +463,7 @@ pub const ArgIterator = struct {
     pub const InitError = ArgIteratorWasi.InitError;
 
     /// You must deinitialize iterator's internal buffers by calling `deinit` when done.
-    pub fn initWithAllocator(allocator: *mem.Allocator) InitError!ArgIterator {
+    pub fn initWithAllocator(allocator: mem.Allocator) InitError!ArgIterator {
         if (builtin.os.tag == .wasi and !builtin.link_libc) {
             return ArgIterator{ .inner = try InnerType.init(allocator) };
         }
@@ -474,7 +474,7 @@ pub const ArgIterator = struct {
     pub const NextError = ArgIteratorWindows.NextError;
 
     /// You must free the returned memory when done.
-    pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![:0]u8) {
+    pub fn next(self: *ArgIterator, allocator: Allocator) ?(NextError![:0]u8) {
         if (builtin.os.tag == .windows) {
             return self.inner.next(allocator);
         } else {
@@ -513,7 +513,7 @@ pub fn args() ArgIterator {
 }
 
 /// You must deinitialize iterator's internal buffers by calling `deinit` when done.
-pub fn argsWithAllocator(allocator: *mem.Allocator) ArgIterator.InitError!ArgIterator {
+pub fn argsWithAllocator(allocator: mem.Allocator) ArgIterator.InitError!ArgIterator {
     return ArgIterator.initWithAllocator(allocator);
 }
 
@@ -539,7 +539,7 @@ test "args iterator" {
 }
 
 /// Caller must call argsFree on result.
-pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
+pub fn argsAlloc(allocator: mem.Allocator) ![][:0]u8 {
     // TODO refactor to only make 1 allocation.
     var it = if (builtin.os.tag == .wasi) try argsWithAllocator(allocator) else args();
     defer it.deinit();
@@ -579,7 +579,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![][:0]u8 {
     return result_slice_list;
 }
 
-pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const [:0]u8) void {
+pub fn argsFree(allocator: mem.Allocator, args_alloc: []const [:0]u8) void {
     var total_bytes: usize = 0;
     for (args_alloc) |arg| {
         total_bytes += @sizeOf([]u8) + arg.len + 1;
@@ -741,7 +741,7 @@ pub fn getBaseAddress() usize {
 /// requirement from `std.zig.system.NativeTargetInfo.detect`. Most likely this will require
 /// introducing a new, lower-level function which takes a callback function, and then this
 /// function which takes an allocator can exist on top of it.
-pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]u8 {
+pub fn getSelfExeSharedLibPaths(allocator: Allocator) error{OutOfMemory}![][:0]u8 {
     switch (builtin.link_mode) {
         .Static => return &[_][:0]u8{},
         .Dynamic => {},
@@ -833,7 +833,7 @@ pub const ExecvError = std.os.ExecveError || error{OutOfMemory};
 /// This function also uses the PATH environment variable to get the full path to the executable.
 /// Due to the heap-allocation, it is illegal to call this function in a fork() child.
 /// For that use case, use the `std.os` functions directly.
-pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
+pub fn execv(allocator: mem.Allocator, argv: []const []const u8) ExecvError {
     return execve(allocator, argv, null);
 }
 
@@ -846,7 +846,7 @@ pub fn execv(allocator: *mem.Allocator, argv: []const []const u8) ExecvError {
 /// Due to the heap-allocation, it is illegal to call this function in a fork() child.
 /// For that use case, use the `std.os` functions directly.
 pub fn execve(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     argv: []const []const u8,
     env_map: ?*const std.BufMap,
 ) ExecvError {
@@ -854,7 +854,7 @@ pub fn execve(
 
     var arena_allocator = std.heap.ArenaAllocator.init(allocator);
     defer arena_allocator.deinit();
-    const arena = &arena_allocator.allocator;
+    const arena = arena_allocator.getAllocator();
 
     const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
     for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig
index 37b783771f39..ab844fef57dc 100644
--- a/lib/std/special/build_runner.zig
+++ b/lib/std/special/build_runner.zig
@@ -16,7 +16,7 @@ pub fn main() !void {
     var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
     defer arena.deinit();
 
-    const allocator = &arena.allocator;
+    const allocator = arena.getAllocator();
     var args = try process.argsAlloc(allocator);
     defer process.argsFree(allocator, args);
 
diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig
index e72204377fef..f90e8aa58ec0 100644
--- a/lib/std/special/test_runner.zig
+++ b/lib/std/special/test_runner.zig
@@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined;
 var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer);
 
 fn processArgs() void {
-    const args = std.process.argsAlloc(&args_allocator.allocator) catch {
+    const args = std.process.argsAlloc(args_allocator.getAllocator()) catch {
         @panic("Too many bytes passed over the CLI to the test runner");
     };
     if (args.len != 2) {
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 8a7fb923de17..3f44b19bc2ba 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -1323,15 +1323,15 @@ pub const Target = struct {
 
     pub const stack_align = 16;
 
-    pub fn zigTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
+    pub fn zigTriple(self: Target, allocator: mem.Allocator) ![]u8 {
         return std.zig.CrossTarget.fromTarget(self).zigTriple(allocator);
     }
 
-    pub fn linuxTripleSimple(allocator: *mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
+    pub fn linuxTripleSimple(allocator: mem.Allocator, cpu_arch: Cpu.Arch, os_tag: Os.Tag, abi: Abi) ![]u8 {
         return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ @tagName(cpu_arch), @tagName(os_tag), @tagName(abi) });
     }
 
-    pub fn linuxTriple(self: Target, allocator: *mem.Allocator) ![]u8 {
+    pub fn linuxTriple(self: Target, allocator: mem.Allocator) ![]u8 {
         return linuxTripleSimple(allocator, self.cpu.arch, self.os.tag, self.abi);
     }
 
diff --git a/lib/std/testing.zig b/lib/std/testing.zig
index 53fc05f6dbc0..b588abbd8caf 100644
--- a/lib/std/testing.zig
+++ b/lib/std/testing.zig
@@ -7,11 +7,11 @@ const print = std.debug.print;
 pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
 
 /// This should only be used in temporary test programs.
-pub const allocator = &allocator_instance.allocator;
+pub const allocator = allocator_instance.getAllocator();
 pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
 
-pub const failing_allocator = &failing_allocator_instance.allocator;
-pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0);
+pub const failing_allocator = failing_allocator_instance.getAllocator();
+pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.getAllocator(), 0);
 
 pub var base_allocator_instance = std.heap.FixedBufferAllocator.init("");
 
diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig
index d8b243d0fae5..137af925ad69 100644
--- a/lib/std/testing/failing_allocator.zig
+++ b/lib/std/testing/failing_allocator.zig
@@ -12,10 +12,9 @@ const mem = std.mem;
 /// Then use `failing_allocator` anywhere you would have used a
 /// different allocator.
 pub const FailingAllocator = struct {
-    allocator: mem.Allocator,
     index: usize,
     fail_index: usize,
-    internal_allocator: *mem.Allocator,
+    internal_allocator: mem.Allocator,
     allocated_bytes: usize,
     freed_bytes: usize,
     allocations: usize,
@@ -29,7 +28,7 @@ pub const FailingAllocator = struct {
     /// var a = try failing_alloc.create(i32);
     /// var b = try failing_alloc.create(i32);
     /// testing.expectError(error.OutOfMemory, failing_alloc.create(i32));
-    pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
+    pub fn init(allocator: mem.Allocator, fail_index: usize) FailingAllocator {
         return FailingAllocator{
             .internal_allocator = allocator,
             .fail_index = fail_index,
@@ -38,25 +37,24 @@ pub const FailingAllocator = struct {
             .freed_bytes = 0,
             .allocations = 0,
             .deallocations = 0,
-            .allocator = mem.Allocator{
-                .allocFn = alloc,
-                .resizeFn = resize,
-            },
         };
     }
 
+    pub fn getAllocator(self: *FailingAllocator) mem.Allocator {
+        return mem.Allocator.init(self, alloc, resize);
+    }
+
     fn alloc(
-        allocator: *std.mem.Allocator,
+        self: *FailingAllocator,
         len: usize,
         ptr_align: u29,
         len_align: u29,
         return_address: usize,
     ) error{OutOfMemory}![]u8 {
-        const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
         if (self.index == self.fail_index) {
             return error.OutOfMemory;
         }
-        const result = try self.internal_allocator.allocFn(self.internal_allocator, len, ptr_align, len_align, return_address);
+        const result = try self.internal_allocator.allocFn(self.internal_allocator.ptr, len, ptr_align, len_align, return_address);
         self.allocated_bytes += result.len;
         self.allocations += 1;
         self.index += 1;
@@ -64,15 +62,14 @@ pub const FailingAllocator = struct {
     }
 
     fn resize(
-        allocator: *std.mem.Allocator,
+        self: *FailingAllocator,
         old_mem: []u8,
         old_align: u29,
         new_len: usize,
         len_align: u29,
         ra: usize,
     ) error{OutOfMemory}!usize {
-        const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
-        const r = self.internal_allocator.resizeFn(self.internal_allocator, old_mem, old_align, new_len, len_align, ra) catch |e| {
+        const r = self.internal_allocator.resizeFn(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| {
             std.debug.assert(new_len > old_mem.len);
             return e;
         };
diff --git a/lib/std/unicode.zig b/lib/std/unicode.zig
index 947a1030bb86..0bd7f378326a 100644
--- a/lib/std/unicode.zig
+++ b/lib/std/unicode.zig
@@ -550,7 +550,7 @@ fn testDecode(bytes: []const u8) !u21 {
 }
 
 /// Caller must free returned memory.
-pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
+pub fn utf16leToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) ![]u8 {
     // optimistically guess that it will all be ascii.
     var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
     errdefer result.deinit();
@@ -567,7 +567,7 @@ pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8
 }
 
 /// Caller must free returned memory.
-pub fn utf16leToUtf8AllocZ(allocator: *mem.Allocator, utf16le: []const u16) ![:0]u8 {
+pub fn utf16leToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) ![:0]u8 {
     // optimistically guess that it will all be ascii.
     var result = try std.ArrayList(u8).initCapacity(allocator, utf16le.len);
     errdefer result.deinit();
@@ -661,7 +661,7 @@ test "utf16leToUtf8" {
     }
 }
 
-pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![:0]u16 {
+pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u16 {
     // optimistically guess that it will not require surrogate pairs
     var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
     errdefer result.deinit();
diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig
index f2ae8d34f671..f96c1bc1b9b9 100644
--- a/lib/std/wasm.zig
+++ b/lib/std/wasm.zig
@@ -361,7 +361,7 @@ pub const Type = struct {
             std.mem.eql(Valtype, self.returns, other.returns);
     }
 
-    pub fn deinit(self: *Type, gpa: *std.mem.Allocator) void {
+    pub fn deinit(self: *Type, gpa: std.mem.Allocator) void {
         gpa.free(self.params);
         gpa.free(self.returns);
         self.* = undefined;
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index 56981a74acaf..1420db8ec2fc 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -100,7 +100,7 @@ pub const BinNameOptions = struct {
 };
 
 /// Returns the standard file system basename of a binary generated by the Zig compiler.
-pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
+pub fn binNameAlloc(allocator: std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 {
     const root_name = options.root_name;
     const target = options.target;
     const ofmt = options.object_format orelse target.getObjectFormat();
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index 02672fbfd1c6..7729805c88e9 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -34,7 +34,7 @@ pub const Location = struct {
     line_end: usize,
 };
 
-pub fn deinit(tree: *Tree, gpa: *mem.Allocator) void {
+pub fn deinit(tree: *Tree, gpa: mem.Allocator) void {
     tree.tokens.deinit(gpa);
     tree.nodes.deinit(gpa);
     gpa.free(tree.extra_data);
@@ -52,7 +52,7 @@ pub const RenderError = error{
 /// for allocating extra stack memory if needed, because this function utilizes recursion.
 /// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
 /// Caller owns the returned slice of bytes, allocated with `gpa`.
-pub fn render(tree: Tree, gpa: *mem.Allocator) RenderError![]u8 {
+pub fn render(tree: Tree, gpa: mem.Allocator) RenderError![]u8 {
     var buffer = std.ArrayList(u8).init(gpa);
     defer buffer.deinit();
 
diff --git a/lib/std/zig/CrossTarget.zig b/lib/std/zig/CrossTarget.zig
index 3c6057a8d902..03bb6bc5ffe6 100644
--- a/lib/std/zig/CrossTarget.zig
+++ b/lib/std/zig/CrossTarget.zig
@@ -520,7 +520,7 @@ pub fn isNative(self: CrossTarget) bool {
     return self.isNativeCpu() and self.isNativeOs() and self.isNativeAbi();
 }
 
-pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory}![]u8 {
+pub fn zigTriple(self: CrossTarget, allocator: mem.Allocator) error{OutOfMemory}![]u8 {
     if (self.isNative()) {
         return allocator.dupe(u8, "native");
     }
@@ -559,13 +559,13 @@ pub fn zigTriple(self: CrossTarget, allocator: *mem.Allocator) error{OutOfMemory
     return result.toOwnedSlice();
 }
 
-pub fn allocDescription(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
+pub fn allocDescription(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
     // TODO is there anything else worthy of the description that is not
     // already captured in the triple?
     return self.zigTriple(allocator);
 }
 
-pub fn linuxTriple(self: CrossTarget, allocator: *mem.Allocator) ![]u8 {
+pub fn linuxTriple(self: CrossTarget, allocator: mem.Allocator) ![]u8 {
     return Target.linuxTripleSimple(allocator, self.getCpuArch(), self.getOsTag(), self.getAbi());
 }
 
@@ -576,7 +576,7 @@ pub fn wantSharedLibSymLinks(self: CrossTarget) bool {
 pub const VcpkgLinkage = std.builtin.LinkMode;
 
 /// Returned slice must be freed by the caller.
-pub fn vcpkgTriplet(self: CrossTarget, allocator: *mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
+pub fn vcpkgTriplet(self: CrossTarget, allocator: mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
     const arch = switch (self.getCpuArch()) {
         .i386 => "x86",
         .x86_64 => "x64",
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
index 28a0c1a19635..89abb3500666 100644
--- a/lib/std/zig/parse.zig
+++ b/lib/std/zig/parse.zig
@@ -11,7 +11,7 @@ pub const Error = error{ParseError} || Allocator.Error;
 
 /// Result should be freed with tree.deinit() when there are
 /// no more references to any of the tokens or nodes.
-pub fn parse(gpa: *Allocator, source: [:0]const u8) Allocator.Error!Ast {
+pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
     var tokens = Ast.TokenList{};
     defer tokens.deinit(gpa);
 
@@ -81,7 +81,7 @@ const null_node: Node.Index = 0;
 
 /// Represents in-progress parsing, will be converted to an Ast after completion.
 const Parser = struct {
-    gpa: *Allocator,
+    gpa: Allocator,
     source: []const u8,
     token_tags: []const Token.Tag,
     token_starts: []const Ast.ByteOffset,
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index fb1a8120d4de..0fb435791762 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -1220,7 +1220,7 @@ test "zig fmt: doc comments on param decl" {
     try testCanonical(
         \\pub const Allocator = struct {
         \\    shrinkFn: fn (
-        \\        self: *Allocator,
+        \\        self: Allocator,
         \\        /// Guaranteed to be the same as what was returned from most recent call to
         \\        /// `allocFn`, `reallocFn`, or `shrinkFn`.
         \\        old_mem: []u8,
@@ -4250,7 +4250,7 @@ test "zig fmt: Only indent multiline string literals in function calls" {
 
 test "zig fmt: Don't add extra newline after if" {
     try testCanonical(
-        \\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+        \\pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
         \\    if (cwd().symLink(existing_path, new_path, .{})) {
         \\        return;
         \\    }
@@ -5319,7 +5319,7 @@ const maxInt = std.math.maxInt;
 
 var fixed_buffer_mem: [100 * 1024]u8 = undefined;
 
-fn testParse(source: [:0]const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
+fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
     const stderr = io.getStdErr().writer();
 
     var tree = try std.zig.parse(allocator, source);
@@ -5351,9 +5351,10 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
     const needed_alloc_count = x: {
         // Try it once with unlimited memory, make sure it works
         var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
-        var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
+        var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), maxInt(usize));
+        const allocator = failing_allocator.getAllocator();
         var anything_changed: bool = undefined;
-        const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
+        const result_source = try testParse(source, allocator, &anything_changed);
         try std.testing.expectEqualStrings(expected_source, result_source);
         const changes_expected = source.ptr != expected_source.ptr;
         if (anything_changed != changes_expected) {
@@ -5361,16 +5362,16 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void {
             return error.TestFailed;
         }
         try std.testing.expect(anything_changed == changes_expected);
-        failing_allocator.allocator.free(result_source);
+        allocator.free(result_source);
         break :x failing_allocator.index;
     };
 
     var fail_index: usize = 0;
     while (fail_index < needed_alloc_count) : (fail_index += 1) {
         var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
-        var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
+        var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), fail_index);
         var anything_changed: bool = undefined;
-        if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
+        if (testParse(source, failing_allocator.getAllocator(), &anything_changed)) |_| {
             return error.NondeterministicMemoryUsage;
         } else |err| switch (err) {
             error.OutOfMemory => {
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index b6f513cc0a54..d2286914b048 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -33,7 +33,7 @@ pub fn main() !void {
 
 fn testOnce() usize {
     var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
-    var allocator = &fixed_buf_alloc.allocator;
+    var allocator = fixed_buf_alloc.getAllocator();
     _ = std.zig.parse(allocator, source) catch @panic("parse failure");
     return fixed_buf_alloc.end_index;
 }
diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig
index 8a909bf562aa..a703e1f3ea5f 100644
--- a/lib/std/zig/render.zig
+++ b/lib/std/zig/render.zig
@@ -37,7 +37,7 @@ pub fn renderTree(buffer: *std.ArrayList(u8), tree: Ast) Error!void {
 }
 
 /// Render all members in the given slice, keeping empty lines where appropriate
-fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
+fn renderMembers(gpa: Allocator, ais: *Ais, tree: Ast, members: []const Ast.Node.Index) Error!void {
     if (members.len == 0) return;
     try renderMember(gpa, ais, tree, members[0], .newline);
     for (members[1..]) |member| {
@@ -46,7 +46,7 @@ fn renderMembers(gpa: *Allocator, ais: *Ais, tree: Ast, members: []const Ast.Nod
     }
 }
 
-fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
+fn renderMember(gpa: Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, space: Space) Error!void {
     const token_tags = tree.tokens.items(.tag);
     const main_tokens = tree.nodes.items(.main_token);
     const datas = tree.nodes.items(.data);
@@ -168,7 +168,7 @@ fn renderMember(gpa: *Allocator, ais: *Ais, tree: Ast, decl: Ast.Node.Index, spa
 }
 
 /// Render all expressions in the slice, keeping empty lines where appropriate
-fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
+fn renderExpressions(gpa: Allocator, ais: *Ais, tree: Ast, expressions: []const Ast.Node.Index, space: Space) Error!void {
     if (expressions.len == 0) return;
     try renderExpression(gpa, ais, tree, expressions[0], space);
     for (expressions[1..]) |expression| {
@@ -177,7 +177,7 @@ fn renderExpressions(gpa: *Allocator, ais: *Ais, tree: Ast, expressions: []const
     }
 }
 
-fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpression(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
     const token_tags = tree.tokens.items(.tag);
     const main_tokens = tree.nodes.items(.main_token);
     const node_tags = tree.nodes.items(.tag);
@@ -710,7 +710,7 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index,
 }
 
 fn renderArrayType(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     array_type: Ast.full.ArrayType,
@@ -732,7 +732,7 @@ fn renderArrayType(
 }
 
 fn renderPtrType(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     ptr_type: Ast.full.PtrType,
@@ -825,7 +825,7 @@ fn renderPtrType(
 }
 
 fn renderSlice(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     slice_node: Ast.Node.Index,
@@ -861,7 +861,7 @@ fn renderSlice(
 }
 
 fn renderAsmOutput(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     asm_output: Ast.Node.Index,
@@ -891,7 +891,7 @@ fn renderAsmOutput(
 }
 
 fn renderAsmInput(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     asm_input: Ast.Node.Index,
@@ -912,7 +912,7 @@ fn renderAsmInput(
     return renderToken(ais, tree, datas[asm_input].rhs, space); // rparen
 }
 
-fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
+fn renderVarDecl(gpa: Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDecl) Error!void {
     if (var_decl.visib_token) |visib_token| {
         try renderToken(ais, tree, visib_token, Space.space); // pub
     }
@@ -1019,7 +1019,7 @@ fn renderVarDecl(gpa: *Allocator, ais: *Ais, tree: Ast, var_decl: Ast.full.VarDe
     return renderToken(ais, tree, var_decl.ast.mut_token + 2, .newline); // ;
 }
 
-fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
+fn renderIf(gpa: Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space: Space) Error!void {
     return renderWhile(gpa, ais, tree, .{
         .ast = .{
             .while_token = if_node.ast.if_token,
@@ -1038,7 +1038,7 @@ fn renderIf(gpa: *Allocator, ais: *Ais, tree: Ast, if_node: Ast.full.If, space:
 
 /// Note that this function is additionally used to render if and for expressions, with
 /// respective values set to null.
-fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
+fn renderWhile(gpa: Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While, space: Space) Error!void {
     const node_tags = tree.nodes.items(.tag);
     const token_tags = tree.tokens.items(.tag);
 
@@ -1141,7 +1141,7 @@ fn renderWhile(gpa: *Allocator, ais: *Ais, tree: Ast, while_node: Ast.full.While
 }
 
 fn renderContainerField(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     field: Ast.full.ContainerField,
@@ -1215,7 +1215,7 @@ fn renderContainerField(
 }
 
 fn renderBuiltinCall(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     builtin_token: Ast.TokenIndex,
@@ -1272,7 +1272,7 @@ fn renderBuiltinCall(
     }
 }
 
-fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
+fn renderFnProto(gpa: Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnProto, space: Space) Error!void {
     const token_tags = tree.tokens.items(.tag);
     const token_starts = tree.tokens.items(.start);
 
@@ -1488,7 +1488,7 @@ fn renderFnProto(gpa: *Allocator, ais: *Ais, tree: Ast, fn_proto: Ast.full.FnPro
 }
 
 fn renderSwitchCase(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     switch_case: Ast.full.SwitchCase,
@@ -1541,7 +1541,7 @@ fn renderSwitchCase(
 }
 
 fn renderBlock(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     block_node: Ast.Node.Index,
@@ -1581,7 +1581,7 @@ fn renderBlock(
 }
 
 fn renderStructInit(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     struct_node: Ast.Node.Index,
@@ -1640,7 +1640,7 @@ fn renderStructInit(
 }
 
 fn renderArrayInit(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     array_init: Ast.full.ArrayInit,
@@ -1859,7 +1859,7 @@ fn renderArrayInit(
 }
 
 fn renderContainerDecl(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     container_decl_node: Ast.Node.Index,
@@ -1956,7 +1956,7 @@ fn renderContainerDecl(
 }
 
 fn renderAsm(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     asm_node: Ast.full.Asm,
@@ -2105,7 +2105,7 @@ fn renderAsm(
 }
 
 fn renderCall(
-    gpa: *Allocator,
+    gpa: Allocator,
     ais: *Ais,
     tree: Ast,
     call: Ast.full.Call,
@@ -2180,7 +2180,7 @@ fn renderCall(
 
 /// Renders the given expression indented, popping the indent before rendering
 /// any following line comments
-fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpressionIndented(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
     const token_starts = tree.tokens.items(.start);
     const token_tags = tree.tokens.items(.tag);
 
@@ -2238,7 +2238,7 @@ fn renderExpressionIndented(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Nod
 
 /// Render an expression, and the comma that follows it, if it is present in the source.
 /// If a comma is present, and `space` is `Space.comma`, render only a single comma.
-fn renderExpressionComma(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
+fn renderExpressionComma(gpa: Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, space: Space) Error!void {
     const token_tags = tree.tokens.items(.tag);
     const maybe_comma = tree.lastToken(node) + 1;
     if (token_tags[maybe_comma] == .comma and space != .comma) {
diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig
index 2a38195b1f6f..5e44e5f8f3a0 100644
--- a/lib/std/zig/string_literal.zig
+++ b/lib/std/zig/string_literal.zig
@@ -131,7 +131,7 @@ pub fn parseAppend(buf: *std.ArrayList(u8), bytes: []const u8) error{OutOfMemory
 
 /// Higher level API. Does not return extra info about parse errors.
 /// Caller owns returned memory.
-pub fn parseAlloc(allocator: *std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
+pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
     var buf = std.ArrayList(u8).init(allocator);
     defer buf.deinit();
 
@@ -147,7 +147,7 @@ test "parse" {
 
     var fixed_buf_mem: [32]u8 = undefined;
     var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]);
-    var alloc = &fixed_buf_alloc.allocator;
+    var alloc = fixed_buf_alloc.getAllocator();
 
     try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\"")));
     try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\"")));
diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig
index 353ad2509682..5ba0d8198c2a 100644
--- a/lib/std/zig/system.zig
+++ b/lib/std/zig/system.zig
@@ -21,7 +21,7 @@ pub const NativePaths = struct {
     rpaths: ArrayList([:0]u8),
     warnings: ArrayList([:0]u8),
 
-    pub fn detect(allocator: *Allocator, native_info: NativeTargetInfo) !NativePaths {
+    pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths {
         const native_target = native_info.target;
 
         var self: NativePaths = .{
@@ -237,7 +237,7 @@ pub const NativeTargetInfo = struct {
     /// Any resources this function allocates are released before returning, and so there is no
     /// deinitialization method.
     /// TODO Remove the Allocator requirement from this function.
-    pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
+    pub fn detect(allocator: Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
         var os = cross_target.getOsTag().defaultVersionRange(cross_target.getCpuArch());
         if (cross_target.os_tag == null) {
             switch (builtin.target.os.tag) {
@@ -441,7 +441,7 @@ pub const NativeTargetInfo = struct {
     /// we fall back to the defaults.
     /// TODO Remove the Allocator requirement from this function.
     fn detectAbiAndDynamicLinker(
-        allocator: *Allocator,
+        allocator: Allocator,
         cpu: Target.Cpu,
         os: Target.Os,
         cross_target: CrossTarget,
diff --git a/lib/std/zig/system/darwin.zig b/lib/std/zig/system/darwin.zig
index 5ce769a79210..c20607440d2e 100644
--- a/lib/std/zig/system/darwin.zig
+++ b/lib/std/zig/system/darwin.zig
@@ -11,7 +11,7 @@ pub const macos = @import("darwin/macos.zig");
 /// Therefore, we resort to the same tool used by Homebrew, namely, invoking `xcode-select --print-path`
 /// and checking if the status is nonzero or the returned string in nonempty.
 /// https://github.com/Homebrew/brew/blob/e119bdc571dcb000305411bc1e26678b132afb98/Library/Homebrew/brew.sh#L630
-pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
+pub fn isDarwinSDKInstalled(allocator: Allocator) bool {
     const argv = &[_][]const u8{ "/usr/bin/xcode-select", "--print-path" };
     const result = std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv }) catch return false;
     defer {
@@ -29,7 +29,7 @@ pub fn isDarwinSDKInstalled(allocator: *Allocator) bool {
 /// Calls `xcrun --sdk  --show-sdk-path` which fetches the path to the SDK sysroot (if any).
 /// Subsequently calls `xcrun --sdk  --show-sdk-version` which fetches version of the SDK.
 /// The caller needs to deinit the resulting struct.
-pub fn getDarwinSDK(allocator: *Allocator, target: Target) ?DarwinSDK {
+pub fn getDarwinSDK(allocator: Allocator, target: Target) ?DarwinSDK {
     const is_simulator_abi = target.abi == .simulator;
     const sdk = switch (target.os.tag) {
         .macos => "macosx",
@@ -82,7 +82,7 @@ pub const DarwinSDK = struct {
     path: []const u8,
     version: Version,
 
-    pub fn deinit(self: DarwinSDK, allocator: *Allocator) void {
+    pub fn deinit(self: DarwinSDK, allocator: Allocator) void {
         allocator.free(self.path);
     }
 };
diff --git a/src/Air.zig b/src/Air.zig
index 2a6da5a4314f..0e8a63acb127 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -841,7 +841,7 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
     };
 }
 
-pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void {
+pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
     air.instructions.deinit(gpa);
     gpa.free(air.extra);
     gpa.free(air.values);
diff --git a/src/AstGen.zig b/src/AstGen.zig
index c7fe1e78723a..5cc7f8ef6581 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -16,7 +16,7 @@ const indexToRef = Zir.indexToRef;
 const trace = @import("tracy.zig").trace;
 const BuiltinFn = @import("BuiltinFn.zig");
 
-gpa: *Allocator,
+gpa: Allocator,
 tree: *const Ast,
 instructions: std.MultiArrayList(Zir.Inst) = .{},
 extra: ArrayListUnmanaged(u32) = .{},
@@ -33,7 +33,7 @@ source_line: u32 = 0,
 source_column: u32 = 0,
 /// Used for temporary allocations; freed after AstGen is complete.
 /// The resulting ZIR code has no references to anything in this arena.
-arena: *Allocator,
+arena: Allocator,
 string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
 compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
 /// The topmost block of the current function.
@@ -92,7 +92,7 @@ fn appendRefsAssumeCapacity(astgen: *AstGen, refs: []const Zir.Inst.Ref) void {
     astgen.extra.appendSliceAssumeCapacity(coerced);
 }
 
-pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
+pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
     var arena = std.heap.ArenaAllocator.init(gpa);
     defer arena.deinit();
 
@@ -196,7 +196,7 @@ pub fn generate(gpa: *Allocator, tree: Ast) Allocator.Error!Zir {
     };
 }
 
-pub fn deinit(astgen: *AstGen, gpa: *Allocator) void {
+pub fn deinit(astgen: *AstGen, gpa: Allocator) void {
     astgen.instructions.deinit(gpa);
     astgen.extra.deinit(gpa);
     astgen.string_table.deinit(gpa);
@@ -2460,7 +2460,7 @@ fn makeDeferScope(
     astgen: *AstGen,
     scope: *Scope,
     node: Ast.Node.Index,
-    block_arena: *Allocator,
+    block_arena: Allocator,
     scope_tag: Scope.Tag,
 ) InnerError!*Scope {
     const tree = astgen.tree;
@@ -2486,7 +2486,7 @@ fn varDecl(
     gz: *GenZir,
     scope: *Scope,
     node: Ast.Node.Index,
-    block_arena: *Allocator,
+    block_arena: Allocator,
     var_decl: Ast.full.VarDecl,
 ) InnerError!*Scope {
     try emitDbgNode(gz, node);
@@ -3030,7 +3030,7 @@ const WipMembers = struct {
     /// (4 for src_hash + line + name + value + align + link_section + address_space)
     const max_decl_size = 10;
 
-    pub fn init(gpa: *Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
+    pub fn init(gpa: Allocator, payload: *ArrayListUnmanaged(u32), decl_count: u32, field_count: u32, comptime bits_per_field: u32, comptime max_field_size: u32) Allocator.Error!Self {
         const payload_top = @intCast(u32, payload.items.len);
         const decls_start = payload_top + (decl_count + decls_per_u32 - 1) / decls_per_u32;
         const field_bits_start = decls_start + decl_count * max_decl_size;
@@ -6178,7 +6178,7 @@ fn tunnelThroughClosure(
     ns: ?*Scope.Namespace,
     value: Zir.Inst.Ref,
     token: Ast.TokenIndex,
-    gpa: *Allocator,
+    gpa: Allocator,
 ) !Zir.Inst.Ref {
     // For trivial values, we don't need a tunnel.
     // Just return the ref.
@@ -8806,7 +8806,7 @@ const Scope = struct {
         /// ref of the capture for decls in this namespace
         captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
 
-        pub fn deinit(self: *Namespace, gpa: *Allocator) void {
+        pub fn deinit(self: *Namespace, gpa: Allocator) void {
             self.decls.deinit(gpa);
             self.captures.deinit(gpa);
             self.* = undefined;
diff --git a/src/Cache.zig b/src/Cache.zig
index fe769cb32a41..1362f6d81620 100644
--- a/src/Cache.zig
+++ b/src/Cache.zig
@@ -1,4 +1,4 @@
-gpa: *Allocator,
+gpa: Allocator,
 manifest_dir: fs.Dir,
 hash: HashHelper = .{},
 
@@ -48,7 +48,7 @@ pub const File = struct {
     bin_digest: BinDigest,
     contents: ?[]const u8,
 
-    pub fn deinit(self: *File, allocator: *Allocator) void {
+    pub fn deinit(self: *File, allocator: Allocator) void {
         if (self.path) |owned_slice| {
             allocator.free(owned_slice);
             self.path = null;
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 29d101e61ede..f6ee58b5efef 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -36,7 +36,7 @@ const libtsan = @import("libtsan.zig");
 const Zir = @import("Zir.zig");
 
 /// General-purpose allocator. Used for both temporary and long-term storage.
-gpa: *Allocator,
+gpa: Allocator,
 /// Arena-allocated memory used during initialization. Should be untouched until deinit.
 arena_state: std.heap.ArenaAllocator.State,
 bin_file: *link.File,
@@ -164,7 +164,7 @@ pub const CRTFile = struct {
     lock: Cache.Lock,
     full_object_path: []const u8,
 
-    fn deinit(self: *CRTFile, gpa: *Allocator) void {
+    fn deinit(self: *CRTFile, gpa: Allocator) void {
         self.lock.release();
         gpa.free(self.full_object_path);
         self.* = undefined;
@@ -253,14 +253,14 @@ pub const CObject = struct {
         line: u32,
         column: u32,
 
-        pub fn destroy(em: *ErrorMsg, gpa: *Allocator) void {
+        pub fn destroy(em: *ErrorMsg, gpa: Allocator) void {
             gpa.free(em.msg);
             gpa.destroy(em);
         }
     };
 
     /// Returns if there was failure.
-    pub fn clearStatus(self: *CObject, gpa: *Allocator) bool {
+    pub fn clearStatus(self: *CObject, gpa: Allocator) bool {
         switch (self.status) {
             .new => return false,
             .failure, .failure_retryable => {
@@ -276,7 +276,7 @@ pub const CObject = struct {
         }
     }
 
-    pub fn destroy(self: *CObject, gpa: *Allocator) void {
+    pub fn destroy(self: *CObject, gpa: Allocator) void {
         _ = self.clearStatus(gpa);
         gpa.destroy(self);
     }
@@ -305,7 +305,7 @@ pub const MiscError = struct {
     msg: []u8,
     children: ?AllErrors = null,
 
-    pub fn deinit(misc_err: *MiscError, gpa: *Allocator) void {
+    pub fn deinit(misc_err: *MiscError, gpa: Allocator) void {
         gpa.free(misc_err.msg);
         if (misc_err.children) |*children| {
             children.deinit(gpa);
@@ -402,7 +402,7 @@ pub const AllErrors = struct {
         }
     };
 
-    pub fn deinit(self: *AllErrors, gpa: *Allocator) void {
+    pub fn deinit(self: *AllErrors, gpa: Allocator) void {
         self.arena.promote(gpa).deinit();
     }
 
@@ -456,7 +456,7 @@ pub const AllErrors = struct {
     }
 
     pub fn addZir(
-        arena: *Allocator,
+        arena: Allocator,
         errors: *std.ArrayList(Message),
         file: *Module.File,
     ) !void {
@@ -559,7 +559,7 @@ pub const AllErrors = struct {
         }
     }
 
-    fn dupeList(list: []const Message, arena: *Allocator) Allocator.Error![]Message {
+    fn dupeList(list: []const Message, arena: Allocator) Allocator.Error![]Message {
         const duped_list = try arena.alloc(Message, list.len);
         for (list) |item, i| {
             duped_list[i] = switch (item) {
@@ -589,7 +589,7 @@ pub const Directory = struct {
     path: ?[]const u8,
     handle: std.fs.Dir,
 
-    pub fn join(self: Directory, allocator: *Allocator, paths: []const []const u8) ![]u8 {
+    pub fn join(self: Directory, allocator: Allocator, paths: []const []const u8) ![]u8 {
         if (self.path) |p| {
             // TODO clean way to do this with only 1 allocation
             const part2 = try std.fs.path.join(allocator, paths);
@@ -600,7 +600,7 @@ pub const Directory = struct {
         }
     }
 
-    pub fn joinZ(self: Directory, allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
+    pub fn joinZ(self: Directory, allocator: Allocator, paths: []const []const u8) ![:0]u8 {
         if (self.path) |p| {
             // TODO clean way to do this with only 1 allocation
             const part2 = try std.fs.path.join(allocator, paths);
@@ -829,7 +829,7 @@ fn addPackageTableToCacheHash(
     }
 }
 
-pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
+pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
     const is_dyn_lib = switch (options.output_mode) {
         .Obj, .Exe => false,
         .Lib => (options.link_mode orelse .Static) == .Dynamic,
@@ -3263,7 +3263,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
     };
 }
 
-pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
+pub fn tmpFilePath(comp: *Compilation, arena: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
     const s = std.fs.path.sep_str;
     const rand_int = std.crypto.random.int(u64);
     if (comp.local_cache_directory.path) |p| {
@@ -3275,7 +3275,7 @@ pub fn tmpFilePath(comp: *Compilation, arena: *Allocator, suffix: []const u8) er
 
 pub fn addTranslateCCArgs(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     argv: *std.ArrayList([]const u8),
     ext: FileExt,
     out_dep_path: ?[]const u8,
@@ -3289,7 +3289,7 @@ pub fn addTranslateCCArgs(
 /// Add common C compiler args between translate-c and C object compilation.
 pub fn addCCArgs(
     comp: *const Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     argv: *std.ArrayList([]const u8),
     ext: FileExt,
     out_dep_path: ?[]const u8,
@@ -3776,7 +3776,7 @@ const LibCDirs = struct {
     libc_installation: ?*const LibCInstallation,
 };
 
-fn getZigShippedLibCIncludeDirsDarwin(arena: *Allocator, zig_lib_dir: []const u8, target: Target) !LibCDirs {
+fn getZigShippedLibCIncludeDirsDarwin(arena: Allocator, zig_lib_dir: []const u8, target: Target) !LibCDirs {
     const arch_name = @tagName(target.cpu.arch);
     const os_name = try std.fmt.allocPrint(arena, "{s}.{d}", .{
         @tagName(target.os.tag),
@@ -3808,7 +3808,7 @@ fn getZigShippedLibCIncludeDirsDarwin(arena: *Allocator, zig_lib_dir: []const u8
 }
 
 fn detectLibCIncludeDirs(
-    arena: *Allocator,
+    arena: Allocator,
     zig_lib_dir: []const u8,
     target: Target,
     is_native_abi: bool,
@@ -3933,7 +3933,7 @@ fn detectLibCIncludeDirs(
     };
 }
 
-fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
+fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const LibCInstallation) !LibCDirs {
     var list = try std.ArrayList([]const u8).initCapacity(arena, 4);
 
     list.appendAssumeCapacity(lci.include_dir.?);
@@ -3965,7 +3965,7 @@ fn detectLibCFromLibCInstallation(arena: *Allocator, target: Target, lci: *const
     };
 }
 
-pub fn get_libc_crt_file(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
+pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
     if (comp.wantBuildGLibCFromSource() or
         comp.wantBuildMuslFromSource() or
         comp.wantBuildMinGWFromSource() or
@@ -4066,7 +4066,7 @@ pub fn dump_argv(argv: []const []const u8) void {
     std.debug.print("{s}\n", .{argv[argv.len - 1]});
 }
 
-pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) Allocator.Error![]u8 {
+pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Allocator.Error![]u8 {
     const t = trace(@src());
     defer t.end();
 
@@ -4717,14 +4717,14 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
     comp.stage1_lock = man.toOwnedLock();
 }
 
-fn stage1LocPath(arena: *Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
+fn stage1LocPath(arena: Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
     const loc = opt_loc orelse return "";
     const directory = loc.directory orelse cache_directory;
     return directory.join(arena, &[_][]const u8{loc.basename});
 }
 
 fn createStage1Pkg(
-    arena: *Allocator,
+    arena: Allocator,
     name: []const u8,
     pkg: *Package,
     parent_pkg: ?*stage1.Pkg,
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 24e4a16b1c02..f090329d5a65 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -51,7 +51,7 @@ pub const SwitchBr = struct {
     else_death_count: u32,
 };
 
-pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
+pub fn analyze(gpa: Allocator, air: Air, zir: Zir) Allocator.Error!Liveness {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -136,7 +136,7 @@ pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices {
     };
 }
 
-pub fn deinit(l: *Liveness, gpa: *Allocator) void {
+pub fn deinit(l: *Liveness, gpa: Allocator) void {
     gpa.free(l.tomb_bits);
     gpa.free(l.extra);
     l.special.deinit(gpa);
@@ -150,7 +150,7 @@ pub const OperandInt = std.math.Log2Int(Bpi);
 
 /// In-progress data; on successful analysis converted into `Liveness`.
 const Analysis = struct {
-    gpa: *Allocator,
+    gpa: Allocator,
     air: Air,
     table: std.AutoHashMapUnmanaged(Air.Inst.Index, void),
     tomb_bits: []usize,
diff --git a/src/Module.zig b/src/Module.zig
index 805d02b65047..a40dcd14198e 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -30,7 +30,7 @@ const target_util = @import("target.zig");
 const build_options = @import("build_options");
 
 /// General-purpose allocator. Used for both temporary and long-term storage.
-gpa: *Allocator,
+gpa: Allocator,
 comp: *Compilation,
 
 /// Where our incremental compilation metadata serialization will go.
@@ -299,10 +299,10 @@ pub const CaptureScope = struct {
 pub const WipCaptureScope = struct {
     scope: *CaptureScope,
     finalized: bool,
-    gpa: *Allocator,
-    perm_arena: *Allocator,
+    gpa: Allocator,
+    perm_arena: Allocator,
 
-    pub fn init(gpa: *Allocator, perm_arena: *Allocator, parent: ?*CaptureScope) !@This() {
+    pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() {
         const scope = try perm_arena.create(CaptureScope);
         scope.* = .{ .parent = parent };
         return @This(){
@@ -469,7 +469,7 @@ pub const Decl = struct {
 
     pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void);
 
-    pub fn clearName(decl: *Decl, gpa: *Allocator) void {
+    pub fn clearName(decl: *Decl, gpa: Allocator) void {
         gpa.free(mem.sliceTo(decl.name, 0));
         decl.name = undefined;
     }
@@ -499,7 +499,7 @@ pub const Decl = struct {
         }
     }
 
-    pub fn clearValues(decl: *Decl, gpa: *Allocator) void {
+    pub fn clearValues(decl: *Decl, gpa: Allocator) void {
         if (decl.getFunction()) |func| {
             func.deinit(gpa);
             gpa.destroy(func);
@@ -636,7 +636,7 @@ pub const Decl = struct {
         return decl.src_namespace.renderFullyQualifiedDebugName(unqualified_name, writer);
     }
 
-    pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![:0]u8 {
+    pub fn getFullyQualifiedName(decl: Decl, gpa: Allocator) ![:0]u8 {
         var buffer = std.ArrayList(u8).init(gpa);
         defer buffer.deinit();
         try decl.renderFullyQualifiedName(buffer.writer());
@@ -855,7 +855,7 @@ pub const Struct = struct {
         is_comptime: bool,
     };
 
-    pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![:0]u8 {
+    pub fn getFullyQualifiedName(s: *Struct, gpa: Allocator) ![:0]u8 {
         return s.owner_decl.getFullyQualifiedName(gpa);
     }
 
@@ -999,7 +999,7 @@ pub const Union = struct {
 
     pub const Fields = std.StringArrayHashMapUnmanaged(Field);
 
-    pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![:0]u8 {
+    pub fn getFullyQualifiedName(s: *Union, gpa: Allocator) ![:0]u8 {
         return s.owner_decl.getFullyQualifiedName(gpa);
     }
 
@@ -1178,7 +1178,7 @@ pub const Opaque = struct {
         };
     }
 
-    pub fn getFullyQualifiedName(s: *Opaque, gpa: *Allocator) ![:0]u8 {
+    pub fn getFullyQualifiedName(s: *Opaque, gpa: Allocator) ![:0]u8 {
         return s.owner_decl.getFullyQualifiedName(gpa);
     }
 };
@@ -1225,7 +1225,7 @@ pub const Fn = struct {
         success,
     };
 
-    pub fn deinit(func: *Fn, gpa: *Allocator) void {
+    pub fn deinit(func: *Fn, gpa: Allocator) void {
         if (func.getInferredErrorSet()) |map| {
             map.deinit(gpa);
         }
@@ -1422,27 +1422,27 @@ pub const File = struct {
     /// successful, this field is unloaded.
     prev_zir: ?*Zir = null,
 
-    pub fn unload(file: *File, gpa: *Allocator) void {
+    pub fn unload(file: *File, gpa: Allocator) void {
         file.unloadTree(gpa);
         file.unloadSource(gpa);
         file.unloadZir(gpa);
     }
 
-    pub fn unloadTree(file: *File, gpa: *Allocator) void {
+    pub fn unloadTree(file: *File, gpa: Allocator) void {
         if (file.tree_loaded) {
             file.tree_loaded = false;
             file.tree.deinit(gpa);
         }
     }
 
-    pub fn unloadSource(file: *File, gpa: *Allocator) void {
+    pub fn unloadSource(file: *File, gpa: Allocator) void {
         if (file.source_loaded) {
             file.source_loaded = false;
             gpa.free(file.source);
         }
     }
 
-    pub fn unloadZir(file: *File, gpa: *Allocator) void {
+    pub fn unloadZir(file: *File, gpa: Allocator) void {
         if (file.zir_loaded) {
             file.zir_loaded = false;
             file.zir.deinit(gpa);
@@ -1466,7 +1466,7 @@ pub const File = struct {
         file.* = undefined;
     }
 
-    pub fn getSource(file: *File, gpa: *Allocator) ![:0]const u8 {
+    pub fn getSource(file: *File, gpa: Allocator) ![:0]const u8 {
         if (file.source_loaded) return file.source;
 
         const root_dir_path = file.pkg.root_src_directory.path orelse ".";
@@ -1499,7 +1499,7 @@ pub const File = struct {
         return source;
     }
 
-    pub fn getTree(file: *File, gpa: *Allocator) !*const Ast {
+    pub fn getTree(file: *File, gpa: Allocator) !*const Ast {
         if (file.tree_loaded) return &file.tree;
 
         const source = try file.getSource(gpa);
@@ -1531,7 +1531,7 @@ pub const File = struct {
         };
     }
 
-    pub fn fullyQualifiedNameZ(file: File, gpa: *Allocator) ![:0]u8 {
+    pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 {
         var buf = std.ArrayList(u8).init(gpa);
         defer buf.deinit();
         try file.renderFullyQualifiedName(buf.writer());
@@ -1539,7 +1539,7 @@ pub const File = struct {
     }
 
     /// Returns the full path to this file relative to its package.
-    pub fn fullPath(file: File, ally: *Allocator) ![]u8 {
+    pub fn fullPath(file: File, ally: Allocator) ![]u8 {
         return file.pkg.root_src_directory.join(ally, &[_][]const u8{file.sub_file_path});
     }
 
@@ -1594,7 +1594,7 @@ pub const ErrorMsg = struct {
     notes: []ErrorMsg = &.{},
 
     pub fn create(
-        gpa: *Allocator,
+        gpa: Allocator,
         src_loc: SrcLoc,
         comptime format: []const u8,
         args: anytype,
@@ -1607,13 +1607,13 @@ pub const ErrorMsg = struct {
 
     /// Assumes the ErrorMsg struct and msg were both allocated with `gpa`,
     /// as well as all notes.
-    pub fn destroy(err_msg: *ErrorMsg, gpa: *Allocator) void {
+    pub fn destroy(err_msg: *ErrorMsg, gpa: Allocator) void {
         err_msg.deinit(gpa);
         gpa.destroy(err_msg);
     }
 
     pub fn init(
-        gpa: *Allocator,
+        gpa: Allocator,
         src_loc: SrcLoc,
         comptime format: []const u8,
         args: anytype,
@@ -1624,7 +1624,7 @@ pub const ErrorMsg = struct {
         };
     }
 
-    pub fn deinit(err_msg: *ErrorMsg, gpa: *Allocator) void {
+    pub fn deinit(err_msg: *ErrorMsg, gpa: Allocator) void {
         for (err_msg.notes) |*note| {
             note.deinit(gpa);
         }
@@ -1651,7 +1651,7 @@ pub const SrcLoc = struct {
         return @bitCast(Ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node));
     }
 
-    pub fn byteOffset(src_loc: SrcLoc, gpa: *Allocator) !u32 {
+    pub fn byteOffset(src_loc: SrcLoc, gpa: Allocator) !u32 {
         switch (src_loc.lazy) {
             .unneeded => unreachable,
             .entire_file => return 0,
@@ -2066,7 +2066,7 @@ pub const SrcLoc = struct {
 
     pub fn byteOffsetBuiltinCallArg(
         src_loc: SrcLoc,
-        gpa: *Allocator,
+        gpa: Allocator,
         node_off: i32,
         arg_index: u32,
     ) !u32 {
@@ -2464,7 +2464,7 @@ pub fn deinit(mod: *Module) void {
     }
 }
 
-fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
+fn freeExportList(gpa: Allocator, export_list: []*Export) void {
     for (export_list) |exp| {
         gpa.free(exp.options.name);
         if (exp.options.section) |s| gpa.free(s);
@@ -2871,7 +2871,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
 /// * Decl.zir_index
 /// * Fn.zir_body_inst
 /// * Decl.zir_decl_index
-fn updateZirRefs(gpa: *Allocator, file: *File, old_zir: Zir) !void {
+fn updateZirRefs(gpa: Allocator, file: *File, old_zir: Zir) !void {
     const new_zir = file.zir;
 
     // Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which
@@ -2965,7 +2965,7 @@ fn updateZirRefs(gpa: *Allocator, file: *File, old_zir: Zir) !void {
 }
 
 pub fn mapOldZirToNew(
-    gpa: *Allocator,
+    gpa: Allocator,
     old_zir: Zir,
     new_zir: Zir,
     inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
@@ -4119,7 +4119,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
     mod.gpa.free(kv.value);
 }
 
-pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: *Allocator) SemaError!Air {
+pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) SemaError!Air {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4427,7 +4427,7 @@ pub fn getNextAnonNameIndex(mod: *Module) usize {
     return @atomicRmw(usize, &mod.next_anon_name_index, .Add, 1, .Monotonic);
 }
 
-pub fn makeIntType(arena: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
+pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type {
     const int_payload = try arena.create(Type.Payload.Bits);
     int_payload.* = .{
         .base = .{
@@ -4459,7 +4459,7 @@ pub fn errNoteNonLazy(
 }
 
 pub fn errorUnionType(
-    arena: *Allocator,
+    arena: Allocator,
     error_set: Type,
     payload: Type,
 ) Allocator.Error!Type {
@@ -4511,7 +4511,7 @@ pub const SwitchProngSrc = union(enum) {
     /// the LazySrcLoc in order to emit a compile error.
     pub fn resolve(
         prong_src: SwitchProngSrc,
-        gpa: *Allocator,
+        gpa: Allocator,
         decl: *Decl,
         switch_node_offset: i32,
         range_expand: RangeExpand,
@@ -4605,7 +4605,7 @@ pub const PeerTypeCandidateSrc = union(enum) {
 
     pub fn resolve(
         self: PeerTypeCandidateSrc,
-        gpa: *Allocator,
+        gpa: Allocator,
         decl: *Decl,
         candidate_i: usize,
     ) ?LazySrcLoc {
diff --git a/src/Package.zig b/src/Package.zig
index 976e92d096ff..df894280a99c 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -21,7 +21,7 @@ root_src_directory_owned: bool = false,
 
 /// Allocate a Package. No references to the slices passed are kept.
 pub fn create(
-    gpa: *Allocator,
+    gpa: Allocator,
     /// Null indicates the current working directory
     root_src_dir_path: ?[]const u8,
     /// Relative to root_src_dir_path
@@ -49,7 +49,7 @@ pub fn create(
 }
 
 pub fn createWithDir(
-    gpa: *Allocator,
+    gpa: Allocator,
     directory: Compilation.Directory,
     /// Relative to `directory`. If null, means `directory` is the root src dir
     /// and is owned externally.
@@ -87,7 +87,7 @@ pub fn createWithDir(
 
 /// Free all memory associated with this package. It does not destroy any packages
 /// inside its table; the caller is responsible for calling destroy() on them.
-pub fn destroy(pkg: *Package, gpa: *Allocator) void {
+pub fn destroy(pkg: *Package, gpa: Allocator) void {
     gpa.free(pkg.root_src_path);
 
     if (pkg.root_src_directory_owned) {
@@ -104,7 +104,7 @@ pub fn destroy(pkg: *Package, gpa: *Allocator) void {
 }
 
 /// Only frees memory associated with the table.
-pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
+pub fn deinitTable(pkg: *Package, gpa: Allocator) void {
     var it = pkg.table.keyIterator();
     while (it.next()) |key| {
         gpa.free(key.*);
@@ -113,13 +113,13 @@ pub fn deinitTable(pkg: *Package, gpa: *Allocator) void {
     pkg.table.deinit(gpa);
 }
 
-pub fn add(pkg: *Package, gpa: *Allocator, name: []const u8, package: *Package) !void {
+pub fn add(pkg: *Package, gpa: Allocator, name: []const u8, package: *Package) !void {
     try pkg.table.ensureUnusedCapacity(gpa, 1);
     const name_dupe = try gpa.dupe(u8, name);
     pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
 }
 
-pub fn addAndAdopt(parent: *Package, gpa: *Allocator, name: []const u8, child: *Package) !void {
+pub fn addAndAdopt(parent: *Package, gpa: Allocator, name: []const u8, child: *Package) !void {
     assert(child.parent == null); // make up your mind, who is the parent??
     child.parent = parent;
     return parent.add(gpa, name, child);
diff --git a/src/RangeSet.zig b/src/RangeSet.zig
index 2a8a55a07737..e4d65353a98a 100644
--- a/src/RangeSet.zig
+++ b/src/RangeSet.zig
@@ -13,7 +13,7 @@ pub const Range = struct {
     src: SwitchProngSrc,
 };
 
-pub fn init(allocator: *std.mem.Allocator) RangeSet {
+pub fn init(allocator: std.mem.Allocator) RangeSet {
     return .{
         .ranges = std.ArrayList(Range).init(allocator),
     };
diff --git a/src/Sema.zig b/src/Sema.zig
index c88841e0d2f6..ce0c5c8ed75c 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -7,13 +7,13 @@
 
 mod: *Module,
 /// Alias to `mod.gpa`.
-gpa: *Allocator,
+gpa: Allocator,
 /// Points to the temporary arena allocator of the Sema.
 /// This arena will be cleared when the sema is destroyed.
-arena: *Allocator,
+arena: Allocator,
 /// Points to the arena allocator for the owner_decl.
 /// This arena will persist until the decl is invalidated.
-perm_arena: *Allocator,
+perm_arena: Allocator,
 code: Zir,
 air_instructions: std.MultiArrayList(Air.Inst) = .{},
 air_extra: std.ArrayListUnmanaged(u32) = .{},
@@ -417,7 +417,7 @@ pub const Block = struct {
         new_decl_arena: std.heap.ArenaAllocator,
         finished: bool,
 
-        pub fn arena(wad: *WipAnonDecl) *Allocator {
+        pub fn arena(wad: *WipAnonDecl) Allocator {
             return &wad.new_decl_arena.allocator;
         }
 
@@ -12793,7 +12793,7 @@ const ComptimePtrMutationKit = struct {
     ty: Type,
     decl_arena: std.heap.ArenaAllocator = undefined,
 
-    fn beginArena(self: *ComptimePtrMutationKit, gpa: *Allocator) *Allocator {
+    fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator {
         self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa);
         return &self.decl_arena.allocator;
     }
diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig
index a3ba3c6115c5..4f9d8dc01561 100644
--- a/src/ThreadPool.zig
+++ b/src/ThreadPool.zig
@@ -9,7 +9,7 @@ const ThreadPool = @This();
 
 mutex: std.Thread.Mutex = .{},
 is_running: bool = true,
-allocator: *std.mem.Allocator,
+allocator: std.mem.Allocator,
 workers: []Worker,
 run_queue: RunQueue = .{},
 idle_queue: IdleQueue = .{},
@@ -55,7 +55,7 @@ const Worker = struct {
     }
 };
 
-pub fn init(self: *ThreadPool, allocator: *std.mem.Allocator) !void {
+pub fn init(self: *ThreadPool, allocator: std.mem.Allocator) !void {
     self.* = .{
         .allocator = allocator,
         .workers = &[_]Worker{},
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 09ded3745ec8..1fa4813a3498 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -16,14 +16,14 @@ pub const Managed = struct {
     /// If this is `null` then there is no memory management needed.
     arena: ?*std.heap.ArenaAllocator.State = null,
 
-    pub fn deinit(self: *Managed, allocator: *Allocator) void {
+    pub fn deinit(self: *Managed, allocator: Allocator) void {
         if (self.arena) |a| a.promote(allocator).deinit();
         self.* = undefined;
     }
 };
 
 /// Assumes arena allocation. Does a recursive copy.
-pub fn copy(self: TypedValue, arena: *Allocator) error{OutOfMemory}!TypedValue {
+pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
     return TypedValue{
         .ty = try self.ty.copy(arena),
         .val = try self.val.copy(arena),
diff --git a/src/Zir.zig b/src/Zir.zig
index 7e5937e40d44..e32200f78cef 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -101,7 +101,7 @@ pub fn hasCompileErrors(code: Zir) bool {
     return code.extra[@enumToInt(ExtraIndex.compile_errors)] != 0;
 }
 
-pub fn deinit(code: *Zir, gpa: *Allocator) void {
+pub fn deinit(code: *Zir, gpa: Allocator) void {
     code.instructions.deinit(gpa);
     gpa.free(code.string_bytes);
     gpa.free(code.extra);
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 84996410f1f9..1cd9ac95a999 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
     CodegenFail,
 };
 
-gpa: *Allocator,
+gpa: Allocator,
 air: Air,
 liveness: Liveness,
 bin_file: *link.File,
@@ -164,7 +164,7 @@ const MCValue = union(enum) {
 const Branch = struct {
     inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
 
-    fn deinit(self: *Branch, gpa: *Allocator) void {
+    fn deinit(self: *Branch, gpa: Allocator) void {
         self.inst_table.deinit(gpa);
         self.* = undefined;
     }
diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig
index 5035b8030409..cb26288d1b77 100644
--- a/src/arch/aarch64/Mir.zig
+++ b/src/arch/aarch64/Mir.zig
@@ -229,7 +229,7 @@ pub const Inst = struct {
     // }
 };
 
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
     mir.instructions.deinit(gpa);
     gpa.free(mir.extra);
     mir.* = undefined;
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index a4624becf833..44df03472f84 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
     CodegenFail,
 };
 
-gpa: *Allocator,
+gpa: Allocator,
 air: Air,
 liveness: Liveness,
 bin_file: *link.File,
@@ -164,7 +164,7 @@ const MCValue = union(enum) {
 const Branch = struct {
     inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
 
-    fn deinit(self: *Branch, gpa: *Allocator) void {
+    fn deinit(self: *Branch, gpa: Allocator) void {
         self.inst_table.deinit(gpa);
         self.* = undefined;
     }
diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig
index 91e2aa941584..5ed7097a8881 100644
--- a/src/arch/arm/Mir.zig
+++ b/src/arch/arm/Mir.zig
@@ -193,7 +193,7 @@ pub const Inst = struct {
     // }
 };
 
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
     mir.instructions.deinit(gpa);
     gpa.free(mir.extra);
     mir.* = undefined;
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index 68789e6e43fe..17ef79b7259e 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
     CodegenFail,
 };
 
-gpa: *Allocator,
+gpa: Allocator,
 air: Air,
 liveness: Liveness,
 bin_file: *link.File,
@@ -158,7 +158,7 @@ const MCValue = union(enum) {
 const Branch = struct {
     inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
 
-    fn deinit(self: *Branch, gpa: *Allocator) void {
+    fn deinit(self: *Branch, gpa: Allocator) void {
         self.inst_table.deinit(gpa);
         self.* = undefined;
     }
diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig
index 23a1da54d5b3..70546e080372 100644
--- a/src/arch/riscv64/Mir.zig
+++ b/src/arch/riscv64/Mir.zig
@@ -101,7 +101,7 @@ pub const Inst = struct {
     // }
 };
 
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
     mir.instructions.deinit(gpa);
     gpa.free(mir.extra);
     mir.* = undefined;
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index 55b0aff81ff9..3f75056e5995 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -508,7 +508,7 @@ const Self = @This();
 decl: *Decl,
 air: Air,
 liveness: Liveness,
-gpa: *mem.Allocator,
+gpa: mem.Allocator,
 /// Table to save `WValue`'s generated by an `Air.Inst`
 values: ValueTable,
 /// Mapping from Air.Inst.Index to block ids
@@ -983,7 +983,7 @@ const CallWValues = struct {
     args: []WValue,
     return_value: WValue,
 
-    fn deinit(self: *CallWValues, gpa: *Allocator) void {
+    fn deinit(self: *CallWValues, gpa: Allocator) void {
         gpa.free(self.args);
         self.* = undefined;
     }
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 5891893f3377..c14a40ecce02 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -411,7 +411,7 @@ pub const Inst = struct {
     };
 };
 
-pub fn deinit(self: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(self: *Mir, gpa: std.mem.Allocator) void {
     self.instructions.deinit(gpa);
     gpa.free(self.extra);
     self.* = undefined;
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index 55e51dcc005e..4580395fd892 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -33,7 +33,7 @@ const InnerError = error{
     CodegenFail,
 };
 
-gpa: *Allocator,
+gpa: Allocator,
 air: Air,
 liveness: Liveness,
 bin_file: *link.File,
@@ -174,7 +174,7 @@ pub const MCValue = union(enum) {
 const Branch = struct {
     inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
 
-    fn deinit(self: *Branch, gpa: *Allocator) void {
+    fn deinit(self: *Branch, gpa: Allocator) void {
         self.inst_table.deinit(gpa);
         self.* = undefined;
     }
diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig
index 690b1ce5f2a4..501a5428d200 100644
--- a/src/arch/x86_64/Mir.zig
+++ b/src/arch/x86_64/Mir.zig
@@ -347,7 +347,7 @@ pub const ArgDbgInfo = struct {
     arg_index: u32,
 };
 
-pub fn deinit(mir: *Mir, gpa: *std.mem.Allocator) void {
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
     mir.instructions.deinit(gpa);
     gpa.free(mir.extra);
     mir.* = undefined;
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 11c899bdd292..43776dea6723 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -163,14 +163,14 @@ pub const Object = struct {
 
 /// This data is available both when outputting .c code and when outputting an .h file.
 pub const DeclGen = struct {
-    gpa: *std.mem.Allocator,
+    gpa: std.mem.Allocator,
     module: *Module,
     decl: *Decl,
     fwd_decl: std.ArrayList(u8),
     error_msg: ?*Module.ErrorMsg,
     /// The key of this map is Type which has references to typedefs_arena.
     typedefs: TypedefMap,
-    typedefs_arena: *std.mem.Allocator,
+    typedefs_arena: std.mem.Allocator,
 
     fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
         @setCold(true);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 21fa0187e3a0..e326b2a677c3 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -23,7 +23,7 @@ const LazySrcLoc = Module.LazySrcLoc;
 
 const Error = error{ OutOfMemory, CodegenFail };
 
-pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
+pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
     const llvm_arch = switch (target.cpu.arch) {
         .arm => "arm",
         .armeb => "armeb",
@@ -190,14 +190,14 @@ pub const Object = struct {
         std.hash_map.default_max_load_percentage,
     );
 
-    pub fn create(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
+    pub fn create(gpa: Allocator, sub_path: []const u8, options: link.Options) !*Object {
         const obj = try gpa.create(Object);
         errdefer gpa.destroy(obj);
         obj.* = try Object.init(gpa, sub_path, options);
         return obj;
     }
 
-    pub fn init(gpa: *Allocator, sub_path: []const u8, options: link.Options) !Object {
+    pub fn init(gpa: Allocator, sub_path: []const u8, options: link.Options) !Object {
         const context = llvm.Context.create();
         errdefer context.dispose();
 
@@ -287,7 +287,7 @@ pub const Object = struct {
         };
     }
 
-    pub fn deinit(self: *Object, gpa: *Allocator) void {
+    pub fn deinit(self: *Object, gpa: Allocator) void {
         self.target_machine.dispose();
         self.llvm_module.dispose();
         self.context.dispose();
@@ -297,13 +297,13 @@ pub const Object = struct {
         self.* = undefined;
     }
 
-    pub fn destroy(self: *Object, gpa: *Allocator) void {
+    pub fn destroy(self: *Object, gpa: Allocator) void {
         self.deinit(gpa);
         gpa.destroy(self);
     }
 
     fn locPath(
-        arena: *Allocator,
+        arena: Allocator,
         opt_loc: ?Compilation.EmitLoc,
         cache_directory: Compilation.Directory,
     ) !?[*:0]u8 {
@@ -554,7 +554,7 @@ pub const DeclGen = struct {
     object: *Object,
     module: *Module,
     decl: *Module.Decl,
-    gpa: *Allocator,
+    gpa: Allocator,
     err_msg: ?*Module.ErrorMsg,
 
     fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@@ -1621,7 +1621,7 @@ pub const DeclGen = struct {
 };
 
 pub const FuncGen = struct {
-    gpa: *Allocator,
+    gpa: Allocator,
     dg: *DeclGen,
     air: Air,
     liveness: Liveness,
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 67faf32471d7..39363064a7e5 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -70,7 +70,7 @@ pub fn writeInstructionWithString(code: *std.ArrayList(Word), opcode: Opcode, ar
 /// of data which needs to be persistent over different calls to Decl code generation.
 pub const SPIRVModule = struct {
     /// A general-purpose allocator which may be used to allocate temporary resources required for compilation.
-    gpa: *Allocator,
+    gpa: Allocator,
 
     /// The parent module.
     module: *Module,
@@ -103,7 +103,7 @@ pub const SPIRVModule = struct {
     /// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
     file_names: std.StringHashMap(ResultId),
 
-    pub fn init(gpa: *Allocator, module: *Module) SPIRVModule {
+    pub fn init(gpa: Allocator, module: *Module) SPIRVModule {
         return .{
             .gpa = gpa,
             .module = module,
diff --git a/src/glibc.zig b/src/glibc.zig
index fd143bac7e1d..e6e67e4f49f0 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -34,7 +34,7 @@ pub const ABI = struct {
     version_table: std.AutoHashMapUnmanaged(target_util.ArchOsAbi, [*]VerList),
     arena_state: std.heap.ArenaAllocator.State,
 
-    pub fn destroy(abi: *ABI, gpa: *Allocator) void {
+    pub fn destroy(abi: *ABI, gpa: Allocator) void {
         abi.version_table.deinit(gpa);
         abi.arena_state.promote(gpa).deinit(); // Frees the ABI memory too.
     }
@@ -59,7 +59,7 @@ pub const LoadMetaDataError = error{
 
 /// This function will emit a log error when there is a problem with the zig installation and then return
 /// `error.ZigInstallationCorrupt`.
-pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
+pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -433,7 +433,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
     }
 }
 
-fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
+fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
     const arch = comp.getTarget().cpu.arch;
     const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
     const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
@@ -493,7 +493,7 @@ fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) !
     return result.items;
 }
 
-fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
+fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void {
     const target = comp.getTarget();
     const arch = target.cpu.arch;
     const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl";
@@ -566,7 +566,7 @@ fn add_include_dirs(comp: *Compilation, arena: *Allocator, args: *std.ArrayList(
 }
 
 fn add_include_dirs_arch(
-    arena: *Allocator,
+    arena: Allocator,
     args: *std.ArrayList([]const u8),
     arch: std.Target.Cpu.Arch,
     opt_nptl: ?[]const u8,
@@ -677,14 +677,14 @@ fn add_include_dirs_arch(
     }
 }
 
-fn path_from_lib(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
+fn path_from_lib(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
     return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
 }
 
 const lib_libc = "libc" ++ path.sep_str;
 const lib_libc_glibc = lib_libc ++ "glibc" ++ path.sep_str;
 
-fn lib_path(comp: *Compilation, arena: *Allocator, sub_path: []const u8) ![]const u8 {
+fn lib_path(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
     return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
 }
 
@@ -692,7 +692,7 @@ pub const BuiltSharedObjects = struct {
     lock: Cache.Lock,
     dir_path: []u8,
 
-    pub fn deinit(self: *BuiltSharedObjects, gpa: *Allocator) void {
+    pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
         self.lock.release();
         gpa.free(self.dir_path);
         self.* = undefined;
@@ -915,7 +915,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
 
 fn buildSharedLib(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     zig_cache_directory: Compilation.Directory,
     bin_directory: Compilation.Directory,
     asm_file_basename: []const u8,
diff --git a/src/introspect.zig b/src/introspect.zig
index be974d3efebb..562d6b04f40a 100644
--- a/src/introspect.zig
+++ b/src/introspect.zig
@@ -33,7 +33,7 @@ fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
 }
 
 /// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDir(gpa: *mem.Allocator) !Compilation.Directory {
+pub fn findZigLibDir(gpa: mem.Allocator) !Compilation.Directory {
     const self_exe_path = try fs.selfExePathAlloc(gpa);
     defer gpa.free(self_exe_path);
 
@@ -42,7 +42,7 @@ pub fn findZigLibDir(gpa: *mem.Allocator) !Compilation.Directory {
 
 /// Both the directory handle and the path are newly allocated resources which the caller now owns.
 pub fn findZigLibDirFromSelfExe(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     self_exe_path: []const u8,
 ) error{ OutOfMemory, FileNotFound }!Compilation.Directory {
     const cwd = fs.cwd();
@@ -61,7 +61,7 @@ pub fn findZigLibDirFromSelfExe(
 }
 
 /// Caller owns returned memory.
-pub fn resolveGlobalCacheDir(allocator: *mem.Allocator) ![]u8 {
+pub fn resolveGlobalCacheDir(allocator: mem.Allocator) ![]u8 {
     if (std.process.getEnvVarOwned(allocator, "ZIG_GLOBAL_CACHE_DIR")) |value| {
         if (value.len > 0) {
             return value;
diff --git a/src/libc_installation.zig b/src/libc_installation.zig
index be0abe301b7c..4cd43c7567e7 100644
--- a/src/libc_installation.zig
+++ b/src/libc_installation.zig
@@ -39,7 +39,7 @@ pub const LibCInstallation = struct {
     };
 
     pub fn parse(
-        allocator: *Allocator,
+        allocator: Allocator,
         libc_file: []const u8,
         target: std.zig.CrossTarget,
     ) !LibCInstallation {
@@ -175,7 +175,7 @@ pub const LibCInstallation = struct {
     }
 
     pub const FindNativeOptions = struct {
-        allocator: *Allocator,
+        allocator: Allocator,
 
         /// If enabled, will print human-friendly errors to stderr.
         verbose: bool = false,
@@ -234,7 +234,7 @@ pub const LibCInstallation = struct {
     }
 
     /// Must be the same allocator passed to `parse` or `findNative`.
-    pub fn deinit(self: *LibCInstallation, allocator: *Allocator) void {
+    pub fn deinit(self: *LibCInstallation, allocator: Allocator) void {
         const fields = std.meta.fields(LibCInstallation);
         inline for (fields) |field| {
             if (@field(self, field.name)) |payload| {
@@ -562,7 +562,7 @@ pub const LibCInstallation = struct {
 };
 
 pub const CCPrintFileNameOptions = struct {
-    allocator: *Allocator,
+    allocator: Allocator,
     search_basename: []const u8,
     want_dirname: enum { full_path, only_dir },
     verbose: bool = false,
diff --git a/src/link.zig b/src/link.zig
index 77464737b083..b57be64d4279 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -165,7 +165,7 @@ pub const File = struct {
     tag: Tag,
     options: Options,
     file: ?fs.File,
-    allocator: *Allocator,
+    allocator: Allocator,
     /// When linking with LLD, this linker code will output an object file only at
     /// this location, and then this path can be placed on the LLD linker line.
     intermediary_basename: ?[]const u8 = null,
@@ -221,7 +221,7 @@ pub const File = struct {
     /// incremental linking fails, falls back to truncating the file and
     /// rewriting it. A malicious file is detected as incremental link failure
     /// and does not cause Illegal Behavior. This operation is not atomic.
-    pub fn openPath(allocator: *Allocator, options: Options) !*File {
+    pub fn openPath(allocator: Allocator, options: Options) !*File {
         if (options.object_format == .macho) {
             return &(try MachO.openPath(allocator, options)).base;
         }
diff --git a/src/link/C.zig b/src/link/C.zig
index 5ca389ae624e..cbd36ebab504 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -36,7 +36,7 @@ const DeclBlock = struct {
     /// Any arena memory the Type points to lives in the `arena` field of `C`.
     typedefs: codegen.TypedefMap.Unmanaged = .{},
 
-    fn deinit(db: *DeclBlock, gpa: *Allocator) void {
+    fn deinit(db: *DeclBlock, gpa: Allocator) void {
         db.code.deinit(gpa);
         db.fwd_decl.deinit(gpa);
         for (db.typedefs.values()) |typedef| {
@@ -47,7 +47,7 @@ const DeclBlock = struct {
     }
 };
 
-pub fn openPath(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*C {
+pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C {
     assert(options.object_format == .c);
 
     if (options.use_llvm) return error.LLVMHasNoCBackend;
@@ -336,7 +336,7 @@ const Flush = struct {
         std.hash_map.default_max_load_percentage,
     );
 
-    fn deinit(f: *Flush, gpa: *Allocator) void {
+    fn deinit(f: *Flush, gpa: Allocator) void {
         f.all_buffers.deinit(gpa);
         f.err_typedef_buf.deinit(gpa);
         f.typedefs.deinit(gpa);
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index c5ed618895b0..d5e3e6caa32f 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -125,7 +125,7 @@ pub const TextBlock = struct {
 
 pub const SrcFn = void;
 
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Coff {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Coff {
     assert(options.object_format == .coff);
 
     if (build_options.have_llvm and options.use_llvm) {
@@ -396,7 +396,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
     return self;
 }
 
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Coff {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
     const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
         0...32 => .p32,
         33...64 => .p64,
@@ -1394,7 +1394,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
     }
 }
 
-fn findLib(self: *Coff, arena: *Allocator, name: []const u8) !?[]const u8 {
+fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 {
     for (self.base.options.lib_dirs) |lib_dir| {
         const full_path = try fs.path.join(arena, &.{ lib_dir, name });
         fs.cwd().access(full_path, .{}) catch |err| switch (err) {
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index fd615bc1091e..6670f1a8b667 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -228,7 +228,7 @@ pub const SrcFn = struct {
     };
 };
 
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Elf {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
     assert(options.object_format == .elf);
 
     if (build_options.have_llvm and options.use_llvm) {
@@ -281,7 +281,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
     return self;
 }
 
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Elf {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
     const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) {
         0...32 => .p32,
         33...64 => .p64,
@@ -2205,7 +2205,7 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
     }
 }
 
-fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void {
+fn deinitRelocs(gpa: Allocator, table: *File.DbgInfoTypeRelocsTable) void {
     var it = table.valueIterator();
     while (it.next()) |value| {
         value.relocs.deinit(gpa);
@@ -3360,7 +3360,7 @@ const CsuObjects = struct {
     crtend: ?[]const u8 = null,
     crtn: ?[]const u8 = null,
 
-    fn init(arena: *mem.Allocator, link_options: link.Options, comp: *const Compilation) !CsuObjects {
+    fn init(arena: mem.Allocator, link_options: link.Options, comp: *const Compilation) !CsuObjects {
         // crt objects are only required for libc.
         if (!link_options.link_libc) return CsuObjects{};
 
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 56f6483cd0f9..bd26b64ad281 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -280,7 +280,7 @@ pub const SrcFn = struct {
     };
 };
 
-pub fn openPath(allocator: *Allocator, options: link.Options) !*MachO {
+pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
     assert(options.object_format == .macho);
 
     const use_stage1 = build_options.is_stage1 and options.use_stage1;
@@ -366,7 +366,7 @@ pub fn openPath(allocator: *Allocator, options: link.Options) !*MachO {
     return self;
 }
 
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*MachO {
     const self = try gpa.create(MachO);
     const cpu_arch = options.target.cpu.arch;
     const os_tag = options.target.os.tag;
@@ -1032,7 +1032,7 @@ pub fn flushObject(self: *MachO, comp: *Compilation) !void {
 }
 
 fn resolveSearchDir(
-    arena: *Allocator,
+    arena: Allocator,
     dir: []const u8,
     syslibroot: ?[]const u8,
 ) !?[]const u8 {
@@ -1074,7 +1074,7 @@ fn resolveSearchDir(
 }
 
 fn resolveLib(
-    arena: *Allocator,
+    arena: Allocator,
     search_dirs: []const []const u8,
     name: []const u8,
     ext: []const u8,
@@ -1098,7 +1098,7 @@ fn resolveLib(
 }
 
 fn resolveFramework(
-    arena: *Allocator,
+    arena: Allocator,
     search_dirs: []const []const u8,
     name: []const u8,
     ext: []const u8,
diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig
index 0d17f305b912..1ebb1224237c 100644
--- a/src/link/MachO/Archive.zig
+++ b/src/link/MachO/Archive.zig
@@ -92,7 +92,7 @@ const ar_hdr = extern struct {
     }
 };
 
-pub fn deinit(self: *Archive, allocator: *Allocator) void {
+pub fn deinit(self: *Archive, allocator: Allocator) void {
     for (self.toc.keys()) |*key| {
         allocator.free(key.*);
     }
@@ -103,7 +103,7 @@ pub fn deinit(self: *Archive, allocator: *Allocator) void {
     allocator.free(self.name);
 }
 
-pub fn parse(self: *Archive, allocator: *Allocator, target: std.Target) !void {
+pub fn parse(self: *Archive, allocator: Allocator, target: std.Target) !void {
     const reader = self.file.reader();
     self.library_offset = try fat.getLibraryOffset(reader, target);
     try self.file.seekTo(self.library_offset);
@@ -128,7 +128,7 @@ pub fn parse(self: *Archive, allocator: *Allocator, target: std.Target) !void {
     try reader.context.seekTo(0);
 }
 
-fn parseName(allocator: *Allocator, header: ar_hdr, reader: anytype) ![]u8 {
+fn parseName(allocator: Allocator, header: ar_hdr, reader: anytype) ![]u8 {
     const name_or_length = try header.nameOrLength();
     var name: []u8 = undefined;
     switch (name_or_length) {
@@ -146,7 +146,7 @@ fn parseName(allocator: *Allocator, header: ar_hdr, reader: anytype) ![]u8 {
     return name;
 }
 
-fn parseTableOfContents(self: *Archive, allocator: *Allocator, reader: anytype) !void {
+fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !void {
     const symtab_size = try reader.readIntLittle(u32);
     var symtab = try allocator.alloc(u8, symtab_size);
     defer allocator.free(symtab);
@@ -188,7 +188,7 @@ fn parseTableOfContents(self: *Archive, allocator: *Allocator, reader: anytype)
     }
 }
 
-pub fn parseObject(self: Archive, allocator: *Allocator, target: std.Target, offset: u32) !Object {
+pub fn parseObject(self: Archive, allocator: Allocator, target: std.Target, offset: u32) !Object {
     const reader = self.file.reader();
     try reader.context.seekTo(offset + self.library_offset);
 
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index f5148ac8ee46..87c1e925590f 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -195,7 +195,7 @@ pub const empty = Atom{
     .dbg_info_len = undefined,
 };
 
-pub fn deinit(self: *Atom, allocator: *Allocator) void {
+pub fn deinit(self: *Atom, allocator: Allocator) void {
     self.dices.deinit(allocator);
     self.lazy_bindings.deinit(allocator);
     self.bindings.deinit(allocator);
@@ -246,7 +246,7 @@ pub fn freeListEligible(self: Atom, macho_file: MachO) bool {
 
 const RelocContext = struct {
     base_addr: u64 = 0,
-    allocator: *Allocator,
+    allocator: Allocator,
     object: *Object,
     macho_file: *MachO,
 };
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 18c85eb8f742..41c7c077412e 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -58,7 +58,7 @@ cdir: ?CodeDirectory = null,
 
 pub fn calcAdhocSignature(
     self: *CodeSignature,
-    allocator: *Allocator,
+    allocator: Allocator,
     file: fs.File,
     id: []const u8,
     text_segment: macho.segment_command_64,
@@ -145,7 +145,7 @@ pub fn write(self: CodeSignature, writer: anytype) !void {
     try self.cdir.?.write(writer);
 }
 
-pub fn deinit(self: *CodeSignature, allocator: *Allocator) void {
+pub fn deinit(self: *CodeSignature, allocator: Allocator) void {
     if (self.cdir) |*cdir| {
         cdir.data.deinit(allocator);
     }
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 1aa63f60d0de..9724a1ae033f 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -104,7 +104,7 @@ const min_nop_size = 2;
 
 /// You must call this function *after* `MachO.populateMissingMetadata()`
 /// has been called to get a viable debug symbols output.
-pub fn populateMissingMetadata(self: *DebugSymbols, allocator: *Allocator) !void {
+pub fn populateMissingMetadata(self: *DebugSymbols, allocator: Allocator) !void {
     if (self.uuid_cmd_index == null) {
         const base_cmd = self.base.load_commands.items[self.base.uuid_cmd_index.?];
         self.uuid_cmd_index = @intCast(u16, self.load_commands.items.len);
@@ -268,7 +268,7 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
     return index;
 }
 
-pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Options) !void {
+pub fn flushModule(self: *DebugSymbols, allocator: Allocator, options: link.Options) !void {
     // TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
     // Zig source code.
     const module = options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
@@ -577,7 +577,7 @@ pub fn flushModule(self: *DebugSymbols, allocator: *Allocator, options: link.Opt
     assert(!self.debug_string_table_dirty);
 }
 
-pub fn deinit(self: *DebugSymbols, allocator: *Allocator) void {
+pub fn deinit(self: *DebugSymbols, allocator: Allocator) void {
     self.dbg_info_decl_free_list.deinit(allocator);
     self.dbg_line_fn_free_list.deinit(allocator);
     self.debug_string_table.deinit(allocator);
@@ -588,7 +588,7 @@ pub fn deinit(self: *DebugSymbols, allocator: *Allocator) void {
     self.file.close();
 }
 
-fn copySegmentCommand(self: *DebugSymbols, allocator: *Allocator, base_cmd: SegmentCommand) !SegmentCommand {
+fn copySegmentCommand(self: *DebugSymbols, allocator: Allocator, base_cmd: SegmentCommand) !SegmentCommand {
     var cmd = SegmentCommand{
         .inner = .{
             .segname = undefined,
@@ -648,7 +648,7 @@ fn updateDwarfSegment(self: *DebugSymbols) void {
 }
 
 /// Writes all load commands and section headers.
-fn writeLoadCommands(self: *DebugSymbols, allocator: *Allocator) !void {
+fn writeLoadCommands(self: *DebugSymbols, allocator: Allocator) !void {
     if (!self.load_commands_dirty) return;
 
     var sizeofcmds: u32 = 0;
@@ -834,7 +834,7 @@ pub const DeclDebugBuffers = struct {
 /// Caller owns the returned memory.
 pub fn initDeclDebugBuffers(
     self: *DebugSymbols,
-    allocator: *Allocator,
+    allocator: Allocator,
     module: *Module,
     decl: *Module.Decl,
 ) !DeclDebugBuffers {
@@ -930,7 +930,7 @@ pub fn initDeclDebugBuffers(
 
 pub fn commitDeclDebugInfo(
     self: *DebugSymbols,
-    allocator: *Allocator,
+    allocator: Allocator,
     module: *Module,
     decl: *Module.Decl,
     debug_buffers: *DeclDebugBuffers,
@@ -1141,7 +1141,7 @@ fn addDbgInfoType(
 
 fn updateDeclDebugInfoAllocation(
     self: *DebugSymbols,
-    allocator: *Allocator,
+    allocator: Allocator,
     text_block: *TextBlock,
     len: u32,
 ) !void {
@@ -1256,7 +1256,7 @@ fn getDebugLineProgramEnd(self: DebugSymbols) u32 {
 }
 
 /// TODO Improve this to use a table.
-fn makeDebugString(self: *DebugSymbols, allocator: *Allocator, bytes: []const u8) !u32 {
+fn makeDebugString(self: *DebugSymbols, allocator: Allocator, bytes: []const u8) !u32 {
     try self.debug_string_table.ensureUnusedCapacity(allocator, bytes.len + 1);
     const result = self.debug_string_table.items.len;
     self.debug_string_table.appendSliceAssumeCapacity(bytes);
diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig
index 2835e57f0cd7..5f7bd9b7639a 100644
--- a/src/link/MachO/Dylib.zig
+++ b/src/link/MachO/Dylib.zig
@@ -44,7 +44,7 @@ pub const Id = struct {
     current_version: u32,
     compatibility_version: u32,
 
-    pub fn default(allocator: *Allocator, name: []const u8) !Id {
+    pub fn default(allocator: Allocator, name: []const u8) !Id {
         return Id{
             .name = try allocator.dupe(u8, name),
             .timestamp = 2,
@@ -53,7 +53,7 @@ pub const Id = struct {
         };
     }
 
-    pub fn fromLoadCommand(allocator: *Allocator, lc: commands.GenericCommandWithData(macho.dylib_command)) !Id {
+    pub fn fromLoadCommand(allocator: Allocator, lc: commands.GenericCommandWithData(macho.dylib_command)) !Id {
         const dylib = lc.inner.dylib;
         const dylib_name = @ptrCast([*:0]const u8, lc.data[dylib.name - @sizeOf(macho.dylib_command) ..]);
         const name = try allocator.dupe(u8, mem.sliceTo(dylib_name, 0));
@@ -66,7 +66,7 @@ pub const Id = struct {
         };
     }
 
-    pub fn deinit(id: *Id, allocator: *Allocator) void {
+    pub fn deinit(id: *Id, allocator: Allocator) void {
         allocator.free(id.name);
     }
 
@@ -125,7 +125,7 @@ pub const Id = struct {
     }
 };
 
-pub fn deinit(self: *Dylib, allocator: *Allocator) void {
+pub fn deinit(self: *Dylib, allocator: Allocator) void {
     for (self.load_commands.items) |*lc| {
         lc.deinit(allocator);
     }
@@ -143,7 +143,7 @@ pub fn deinit(self: *Dylib, allocator: *Allocator) void {
     }
 }
 
-pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target, dependent_libs: anytype) !void {
+pub fn parse(self: *Dylib, allocator: Allocator, target: std.Target, dependent_libs: anytype) !void {
     log.debug("parsing shared library '{s}'", .{self.name});
 
     self.library_offset = try fat.getLibraryOffset(self.file.reader(), target);
@@ -170,7 +170,7 @@ pub fn parse(self: *Dylib, allocator: *Allocator, target: std.Target, dependent_
     try self.parseSymbols(allocator);
 }
 
-fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype, dependent_libs: anytype) !void {
+fn readLoadCommands(self: *Dylib, allocator: Allocator, reader: anytype, dependent_libs: anytype) !void {
     const should_lookup_reexports = self.header.?.flags & macho.MH_NO_REEXPORTED_DYLIBS == 0;
 
     try self.load_commands.ensureUnusedCapacity(allocator, self.header.?.ncmds);
@@ -203,7 +203,7 @@ fn readLoadCommands(self: *Dylib, allocator: *Allocator, reader: anytype, depend
     }
 }
 
-fn parseId(self: *Dylib, allocator: *Allocator) !void {
+fn parseId(self: *Dylib, allocator: Allocator) !void {
     const index = self.id_cmd_index orelse {
         log.debug("no LC_ID_DYLIB load command found; using hard-coded defaults...", .{});
         self.id = try Id.default(allocator, self.name);
@@ -212,7 +212,7 @@ fn parseId(self: *Dylib, allocator: *Allocator) !void {
     self.id = try Id.fromLoadCommand(allocator, self.load_commands.items[index].Dylib);
 }
 
-fn parseSymbols(self: *Dylib, allocator: *Allocator) !void {
+fn parseSymbols(self: *Dylib, allocator: Allocator) !void {
     const index = self.symtab_cmd_index orelse return;
     const symtab_cmd = self.load_commands.items[index].Symtab;
 
@@ -236,7 +236,7 @@ fn parseSymbols(self: *Dylib, allocator: *Allocator) !void {
     }
 }
 
-fn addObjCClassSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addObjCClassSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
     const expanded = &[_][]const u8{
         try std.fmt.allocPrint(allocator, "_OBJC_CLASS_$_{s}", .{sym_name}),
         try std.fmt.allocPrint(allocator, "_OBJC_METACLASS_$_{s}", .{sym_name}),
@@ -248,29 +248,29 @@ fn addObjCClassSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8)
     }
 }
 
-fn addObjCIVarSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addObjCIVarSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
     const expanded = try std.fmt.allocPrint(allocator, "_OBJC_IVAR_$_{s}", .{sym_name});
     if (self.symbols.contains(expanded)) return;
     try self.symbols.putNoClobber(allocator, expanded, .{});
 }
 
-fn addObjCEhTypeSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addObjCEhTypeSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
     const expanded = try std.fmt.allocPrint(allocator, "_OBJC_EHTYPE_$_{s}", .{sym_name});
     if (self.symbols.contains(expanded)) return;
     try self.symbols.putNoClobber(allocator, expanded, .{});
 }
 
-fn addSymbol(self: *Dylib, allocator: *Allocator, sym_name: []const u8) !void {
+fn addSymbol(self: *Dylib, allocator: Allocator, sym_name: []const u8) !void {
     if (self.symbols.contains(sym_name)) return;
     try self.symbols.putNoClobber(allocator, try allocator.dupe(u8, sym_name), {});
 }
 
 const TargetMatcher = struct {
-    allocator: *Allocator,
+    allocator: Allocator,
     target: std.Target,
     target_strings: std.ArrayListUnmanaged([]const u8) = .{},
 
-    fn init(allocator: *Allocator, target: std.Target) !TargetMatcher {
+    fn init(allocator: Allocator, target: std.Target) !TargetMatcher {
         var self = TargetMatcher{
             .allocator = allocator,
             .target = target,
@@ -297,7 +297,7 @@ const TargetMatcher = struct {
         self.target_strings.deinit(self.allocator);
     }
 
-    fn targetToAppleString(allocator: *Allocator, target: std.Target) ![]const u8 {
+    fn targetToAppleString(allocator: Allocator, target: std.Target) ![]const u8 {
         const arch = switch (target.cpu.arch) {
             .aarch64 => "arm64",
             .x86_64 => "x86_64",
@@ -336,7 +336,7 @@ const TargetMatcher = struct {
 
 pub fn parseFromStub(
     self: *Dylib,
-    allocator: *Allocator,
+    allocator: Allocator,
     target: std.Target,
     lib_stub: LibStub,
     dependent_libs: anytype,
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index a7db8566004d..d7cc7c54053e 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -74,7 +74,7 @@ const DebugInfo = struct {
     debug_line: []u8,
     debug_ranges: []u8,
 
-    pub fn parseFromObject(allocator: *Allocator, object: *const Object) !?DebugInfo {
+    pub fn parseFromObject(allocator: Allocator, object: *const Object) !?DebugInfo {
         var debug_info = blk: {
             const index = object.dwarf_debug_info_index orelse return null;
             break :blk try object.readSection(allocator, index);
@@ -118,7 +118,7 @@ const DebugInfo = struct {
         };
     }
 
-    pub fn deinit(self: *DebugInfo, allocator: *Allocator) void {
+    pub fn deinit(self: *DebugInfo, allocator: Allocator) void {
         allocator.free(self.debug_info);
         allocator.free(self.debug_abbrev);
         allocator.free(self.debug_str);
@@ -130,7 +130,7 @@ const DebugInfo = struct {
     }
 };
 
-pub fn deinit(self: *Object, allocator: *Allocator) void {
+pub fn deinit(self: *Object, allocator: Allocator) void {
     for (self.load_commands.items) |*lc| {
         lc.deinit(allocator);
     }
@@ -160,7 +160,7 @@ pub fn deinit(self: *Object, allocator: *Allocator) void {
     }
 }
 
-pub fn free(self: *Object, allocator: *Allocator, macho_file: *MachO) void {
+pub fn free(self: *Object, allocator: Allocator, macho_file: *MachO) void {
     log.debug("freeObject {*}", .{self});
 
     var it = self.end_atoms.iterator();
@@ -227,7 +227,7 @@ fn freeAtoms(self: *Object, macho_file: *MachO) void {
     }
 }
 
-pub fn parse(self: *Object, allocator: *Allocator, target: std.Target) !void {
+pub fn parse(self: *Object, allocator: Allocator, target: std.Target) !void {
     const reader = self.file.reader();
     if (self.file_offset) |offset| {
         try reader.context.seekTo(offset);
@@ -263,7 +263,7 @@ pub fn parse(self: *Object, allocator: *Allocator, target: std.Target) !void {
     try self.parseDebugInfo(allocator);
 }
 
-pub fn readLoadCommands(self: *Object, allocator: *Allocator, reader: anytype) !void {
+pub fn readLoadCommands(self: *Object, allocator: Allocator, reader: anytype) !void {
     const header = self.header orelse unreachable; // Unreachable here signifies a fatal unexplored condition.
     const offset = self.file_offset orelse 0;
 
@@ -381,7 +381,7 @@ fn filterDice(dices: []macho.data_in_code_entry, start_addr: u64, end_addr: u64)
     return dices[start..end];
 }
 
-pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO) !void {
+pub fn parseIntoAtoms(self: *Object, allocator: Allocator, macho_file: *MachO) !void {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -555,7 +555,7 @@ pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO)
     }
 }
 
-fn parseSymtab(self: *Object, allocator: *Allocator) !void {
+fn parseSymtab(self: *Object, allocator: Allocator) !void {
     const index = self.symtab_cmd_index orelse return;
     const symtab_cmd = self.load_commands.items[index].Symtab;
 
@@ -571,7 +571,7 @@ fn parseSymtab(self: *Object, allocator: *Allocator) !void {
     try self.strtab.appendSlice(allocator, strtab);
 }
 
-pub fn parseDebugInfo(self: *Object, allocator: *Allocator) !void {
+pub fn parseDebugInfo(self: *Object, allocator: Allocator) !void {
     log.debug("parsing debug info in '{s}'", .{self.name});
 
     var debug_info = blk: {
@@ -603,7 +603,7 @@ pub fn parseDebugInfo(self: *Object, allocator: *Allocator) !void {
     }
 }
 
-pub fn parseDataInCode(self: *Object, allocator: *Allocator) !void {
+pub fn parseDataInCode(self: *Object, allocator: Allocator) !void {
     const index = self.data_in_code_cmd_index orelse return;
     const data_in_code = self.load_commands.items[index].LinkeditData;
 
@@ -623,7 +623,7 @@ pub fn parseDataInCode(self: *Object, allocator: *Allocator) !void {
     }
 }
 
-fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 {
+fn readSection(self: Object, allocator: Allocator, index: u16) ![]u8 {
     const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
     const sect = seg.sections.items[index];
     var buffer = try allocator.alloc(u8, @intCast(usize, sect.size));
diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig
index c166aaf432cb..d85aabe63d80 100644
--- a/src/link/MachO/Trie.zig
+++ b/src/link/MachO/Trie.zig
@@ -65,7 +65,7 @@ pub const Node = struct {
         to: *Node,
         label: []u8,
 
-        fn deinit(self: *Edge, allocator: *Allocator) void {
+        fn deinit(self: *Edge, allocator: Allocator) void {
             self.to.deinit(allocator);
             allocator.destroy(self.to);
             allocator.free(self.label);
@@ -75,7 +75,7 @@ pub const Node = struct {
         }
     };
 
-    fn deinit(self: *Node, allocator: *Allocator) void {
+    fn deinit(self: *Node, allocator: Allocator) void {
         for (self.edges.items) |*edge| {
             edge.deinit(allocator);
         }
@@ -83,7 +83,7 @@ pub const Node = struct {
     }
 
     /// Inserts a new node starting from `self`.
-    fn put(self: *Node, allocator: *Allocator, label: []const u8) !*Node {
+    fn put(self: *Node, allocator: Allocator, label: []const u8) !*Node {
         // Check for match with edges from this node.
         for (self.edges.items) |*edge| {
             const match = mem.indexOfDiff(u8, edge.label, label) orelse return edge.to;
@@ -126,7 +126,7 @@ pub const Node = struct {
     }
 
     /// Recursively parses the node from the input byte stream.
-    fn read(self: *Node, allocator: *Allocator, reader: anytype) Trie.ReadError!usize {
+    fn read(self: *Node, allocator: Allocator, reader: anytype) Trie.ReadError!usize {
         self.node_dirty = true;
         const trie_offset = try reader.context.getPos();
         self.trie_offset = trie_offset;
@@ -308,7 +308,7 @@ pub const ExportSymbol = struct {
 /// Insert a symbol into the trie, updating the prefixes in the process.
 /// This operation may change the layout of the trie by splicing edges in
 /// certain circumstances.
-pub fn put(self: *Trie, allocator: *Allocator, symbol: ExportSymbol) !void {
+pub fn put(self: *Trie, allocator: Allocator, symbol: ExportSymbol) !void {
     try self.createRoot(allocator);
     const node = try self.root.?.put(allocator, symbol.name);
     node.terminal_info = .{
@@ -322,7 +322,7 @@ pub fn put(self: *Trie, allocator: *Allocator, symbol: ExportSymbol) !void {
 /// This step performs multiple passes through the trie ensuring
 /// there are no gaps after every `Node` is ULEB128 encoded.
 /// Call this method before trying to `write` the trie to a byte stream.
-pub fn finalize(self: *Trie, allocator: *Allocator) !void {
+pub fn finalize(self: *Trie, allocator: Allocator) !void {
     if (!self.trie_dirty) return;
 
     self.ordered_nodes.shrinkRetainingCapacity(0);
@@ -361,7 +361,7 @@ const ReadError = error{
 };
 
 /// Parse the trie from a byte stream.
-pub fn read(self: *Trie, allocator: *Allocator, reader: anytype) ReadError!usize {
+pub fn read(self: *Trie, allocator: Allocator, reader: anytype) ReadError!usize {
     try self.createRoot(allocator);
     return self.root.?.read(allocator, reader);
 }
@@ -377,7 +377,7 @@ pub fn write(self: Trie, writer: anytype) !u64 {
     return counting_writer.bytes_written;
 }
 
-pub fn deinit(self: *Trie, allocator: *Allocator) void {
+pub fn deinit(self: *Trie, allocator: Allocator) void {
     if (self.root) |root| {
         root.deinit(allocator);
         allocator.destroy(root);
@@ -385,7 +385,7 @@ pub fn deinit(self: *Trie, allocator: *Allocator) void {
     self.ordered_nodes.deinit(allocator);
 }
 
-fn createRoot(self: *Trie, allocator: *Allocator) !void {
+fn createRoot(self: *Trie, allocator: Allocator) !void {
     if (self.root == null) {
         const root = try allocator.create(Node);
         root.* = .{ .base = self };
diff --git a/src/link/MachO/commands.zig b/src/link/MachO/commands.zig
index b3a96c80472c..41ea52b9df7f 100644
--- a/src/link/MachO/commands.zig
+++ b/src/link/MachO/commands.zig
@@ -50,7 +50,7 @@ pub const LoadCommand = union(enum) {
     Rpath: GenericCommandWithData(macho.rpath_command),
     Unknown: GenericCommandWithData(macho.load_command),
 
-    pub fn read(allocator: *Allocator, reader: anytype) !LoadCommand {
+    pub fn read(allocator: Allocator, reader: anytype) !LoadCommand {
         const header = try reader.readStruct(macho.load_command);
         var buffer = try allocator.alloc(u8, header.cmdsize);
         defer allocator.free(buffer);
@@ -177,7 +177,7 @@ pub const LoadCommand = union(enum) {
         };
     }
 
-    pub fn deinit(self: *LoadCommand, allocator: *Allocator) void {
+    pub fn deinit(self: *LoadCommand, allocator: Allocator) void {
         return switch (self.*) {
             .Segment => |*x| x.deinit(allocator),
             .Dylinker => |*x| x.deinit(allocator),
@@ -218,7 +218,7 @@ pub const SegmentCommand = struct {
     inner: macho.segment_command_64,
     sections: std.ArrayListUnmanaged(macho.section_64) = .{},
 
-    pub fn read(alloc: *Allocator, reader: anytype) !SegmentCommand {
+    pub fn read(alloc: Allocator, reader: anytype) !SegmentCommand {
         const inner = try reader.readStruct(macho.segment_command_64);
         var segment = SegmentCommand{
             .inner = inner,
@@ -241,7 +241,7 @@ pub const SegmentCommand = struct {
         }
     }
 
-    pub fn deinit(self: *SegmentCommand, alloc: *Allocator) void {
+    pub fn deinit(self: *SegmentCommand, alloc: Allocator) void {
         self.sections.deinit(alloc);
     }
 
@@ -299,7 +299,7 @@ pub fn GenericCommandWithData(comptime Cmd: type) type {
 
         const Self = @This();
 
-        pub fn read(allocator: *Allocator, reader: anytype) !Self {
+        pub fn read(allocator: Allocator, reader: anytype) !Self {
             const inner = try reader.readStruct(Cmd);
             var data = try allocator.alloc(u8, inner.cmdsize - @sizeOf(Cmd));
             errdefer allocator.free(data);
@@ -315,7 +315,7 @@ pub fn GenericCommandWithData(comptime Cmd: type) type {
             try writer.writeAll(self.data);
         }
 
-        pub fn deinit(self: *Self, allocator: *Allocator) void {
+        pub fn deinit(self: *Self, allocator: Allocator) void {
             allocator.free(self.data);
         }
 
@@ -327,7 +327,7 @@ pub fn GenericCommandWithData(comptime Cmd: type) type {
 }
 
 pub fn createLoadDylibCommand(
-    allocator: *Allocator,
+    allocator: Allocator,
     name: []const u8,
     timestamp: u32,
     current_version: u32,
@@ -395,7 +395,7 @@ pub fn sectionIsDontDeadStripIfReferencesLive(sect: macho.section_64) bool {
     return sectionAttrs(sect) & macho.S_ATTR_LIVE_SUPPORT != 0;
 }
 
-fn testRead(allocator: *Allocator, buffer: []const u8, expected: anytype) !void {
+fn testRead(allocator: Allocator, buffer: []const u8, expected: anytype) !void {
     var stream = io.fixedBufferStream(buffer);
     var given = try LoadCommand.read(allocator, stream.reader());
     defer given.deinit(allocator);
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index b25f0953265b..c4c42940b85f 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -132,7 +132,7 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
 
 pub const PtrWidth = enum { p32, p64 };
 
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
     if (options.use_llvm)
         return error.LLVMBackendDoesNotSupportPlan9;
     const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) {
@@ -621,7 +621,7 @@ pub fn deinit(self: *Plan9) void {
 
 pub const Export = ?usize;
 pub const base_tag = .plan9;
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
     if (options.use_llvm)
         return error.LLVMBackendDoesNotSupportPlan9;
     assert(options.object_format == .plan9);
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index f9d3f7a1e65f..7fc54d509ec5 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -58,7 +58,7 @@ const DeclGenContext = struct {
     liveness: Liveness,
 };
 
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*SpirV {
     const spirv = try gpa.create(SpirV);
     spirv.* = .{
         .base = .{
@@ -87,7 +87,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
     return spirv;
 }
 
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*SpirV {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*SpirV {
     assert(options.object_format == .spirv);
 
     if (options.use_llvm) return error.LLVM_BackendIsTODO_ForSpirV; // TODO: LLVM Doesn't support SpirV at all.
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 8933bdef9feb..a8606ac27f7c 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -97,7 +97,7 @@ pub const FnData = struct {
     };
 };
 
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
+pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Wasm {
     assert(options.object_format == .wasm);
 
     if (build_options.have_llvm and options.use_llvm) {
@@ -138,7 +138,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
     return wasm_bin;
 }
 
-pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Wasm {
+pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
     const wasm_bin = try gpa.create(Wasm);
     wasm_bin.* = .{
         .base = .{
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 0055955bde0b..cabd9f1571b4 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -42,7 +42,7 @@ pub const empty: Atom = .{
 };
 
 /// Frees all resources owned by this `Atom`.
-pub fn deinit(self: *Atom, gpa: *Allocator) void {
+pub fn deinit(self: *Atom, gpa: Allocator) void {
     self.relocs.deinit(gpa);
     self.code.deinit(gpa);
 }
diff --git a/src/link/tapi.zig b/src/link/tapi.zig
index 0bde3100ddfb..84257de388e3 100644
--- a/src/link/tapi.zig
+++ b/src/link/tapi.zig
@@ -106,7 +106,7 @@ pub const LibStub = struct {
     /// Typed contents of the tbd file.
     inner: []Tbd,
 
-    pub fn loadFromFile(allocator: *Allocator, file: fs.File) !LibStub {
+    pub fn loadFromFile(allocator: Allocator, file: fs.File) !LibStub {
         const source = try file.readToEndAlloc(allocator, std.math.maxInt(u32));
         defer allocator.free(source);
 
diff --git a/src/link/tapi/parse.zig b/src/link/tapi/parse.zig
index 0c923f961bc1..0c40f613dc04 100644
--- a/src/link/tapi/parse.zig
+++ b/src/link/tapi/parse.zig
@@ -37,7 +37,7 @@ pub const Node = struct {
         return @fieldParentPtr(T, "base", self);
     }
 
-    pub fn deinit(self: *Node, allocator: *Allocator) void {
+    pub fn deinit(self: *Node, allocator: Allocator) void {
         switch (self.tag) {
             .doc => @fieldParentPtr(Node.Doc, "base", self).deinit(allocator),
             .map => @fieldParentPtr(Node.Map, "base", self).deinit(allocator),
@@ -69,7 +69,7 @@ pub const Node = struct {
 
         pub const base_tag: Node.Tag = .doc;
 
-        pub fn deinit(self: *Doc, allocator: *Allocator) void {
+        pub fn deinit(self: *Doc, allocator: Allocator) void {
             if (self.value) |node| {
                 node.deinit(allocator);
                 allocator.destroy(node);
@@ -113,7 +113,7 @@ pub const Node = struct {
             value: *Node,
         };
 
-        pub fn deinit(self: *Map, allocator: *Allocator) void {
+        pub fn deinit(self: *Map, allocator: Allocator) void {
             for (self.values.items) |entry| {
                 entry.value.deinit(allocator);
                 allocator.destroy(entry.value);
@@ -149,7 +149,7 @@ pub const Node = struct {
 
         pub const base_tag: Node.Tag = .list;
 
-        pub fn deinit(self: *List, allocator: *Allocator) void {
+        pub fn deinit(self: *List, allocator: Allocator) void {
             for (self.values.items) |node| {
                 node.deinit(allocator);
                 allocator.destroy(node);
@@ -198,12 +198,12 @@ pub const Node = struct {
 };
 
 pub const Tree = struct {
-    allocator: *Allocator,
+    allocator: Allocator,
     source: []const u8,
     tokens: []Token,
     docs: std.ArrayListUnmanaged(*Node) = .{},
 
-    pub fn init(allocator: *Allocator) Tree {
+    pub fn init(allocator: Allocator) Tree {
         return .{
             .allocator = allocator,
             .source = undefined,
@@ -266,7 +266,7 @@ pub const Tree = struct {
 };
 
 const Parser = struct {
-    allocator: *Allocator,
+    allocator: Allocator,
     tree: *Tree,
     token_it: *TokenIterator,
     scopes: std.ArrayListUnmanaged(Scope) = .{},
diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig
index 25d2c73e8207..4392befb597a 100644
--- a/src/link/tapi/yaml.zig
+++ b/src/link/tapi/yaml.zig
@@ -149,7 +149,7 @@ pub const Value = union(ValueType) {
         };
     }
 
-    fn fromNode(arena: *Allocator, tree: *const Tree, node: *const Node, type_hint: ?ValueType) YamlError!Value {
+    fn fromNode(arena: Allocator, tree: *const Tree, node: *const Node, type_hint: ?ValueType) YamlError!Value {
         if (node.cast(Node.Doc)) |doc| {
             const inner = doc.value orelse {
                 // empty doc
@@ -246,7 +246,7 @@ pub const Yaml = struct {
         }
     }
 
-    pub fn load(allocator: *Allocator, source: []const u8) !Yaml {
+    pub fn load(allocator: Allocator, source: []const u8) !Yaml {
         var arena = ArenaAllocator.init(allocator);
 
         var tree = Tree.init(&arena.allocator);
diff --git a/src/main.zig b/src/main.zig
index b28d01a51a2c..52272db8ef2c 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -165,7 +165,7 @@ pub fn main() anyerror!void {
     return mainArgs(gpa, arena, args);
 }
 
-pub fn mainArgs(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void {
+pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
     if (args.len <= 1) {
         std.log.info("{s}", .{usage});
         fatal("expected command argument", .{});
@@ -535,7 +535,7 @@ const Emit = union(enum) {
     }
 };
 
-fn optionalStringEnvVar(arena: *Allocator, name: []const u8) !?[]const u8 {
+fn optionalStringEnvVar(arena: Allocator, name: []const u8) !?[]const u8 {
     if (std.process.getEnvVarOwned(arena, name)) |value| {
         return value;
     } else |err| switch (err) {
@@ -554,8 +554,8 @@ const ArgMode = union(enum) {
 };
 
 fn buildOutputType(
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     all_args: []const []const u8,
     arg_mode: ArgMode,
 ) !void {
@@ -2645,7 +2645,7 @@ fn buildOutputType(
 }
 
 fn parseCrossTargetOrReportFatalError(
-    allocator: *Allocator,
+    allocator: Allocator,
     opts: std.zig.CrossTarget.ParseOptions,
 ) !std.zig.CrossTarget {
     var opts_with_diags = opts;
@@ -2686,8 +2686,8 @@ fn parseCrossTargetOrReportFatalError(
 
 fn runOrTest(
     comp: *Compilation,
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     emit_bin_loc: ?Compilation.EmitLoc,
     test_exec_args: []const ?[]const u8,
     self_exe_path: []const u8,
@@ -2818,7 +2818,7 @@ const AfterUpdateHook = union(enum) {
     update: []const u8,
 };
 
-fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !void {
+fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void {
     try comp.update();
 
     var errors = try comp.getAllErrorsAlloc();
@@ -2872,7 +2872,7 @@ fn updateModule(gpa: *Allocator, comp: *Compilation, hook: AfterUpdateHook) !voi
     }
 }
 
-fn freePkgTree(gpa: *Allocator, pkg: *Package, free_parent: bool) void {
+fn freePkgTree(gpa: Allocator, pkg: *Package, free_parent: bool) void {
     {
         var it = pkg.table.valueIterator();
         while (it.next()) |value| {
@@ -2884,7 +2884,7 @@ fn freePkgTree(gpa: *Allocator, pkg: *Package, free_parent: bool) void {
     }
 }
 
-fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !void {
+fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void {
     if (!build_options.have_llvm)
         fatal("cannot translate-c: compiler built without LLVM extensions", .{});
 
@@ -3031,7 +3031,7 @@ pub const usage_libc =
     \\
 ;
 
-pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void {
+pub fn cmdLibC(gpa: Allocator, args: []const []const u8) !void {
     var input_file: ?[]const u8 = null;
     var target_arch_os_abi: []const u8 = "native";
     {
@@ -3100,8 +3100,8 @@ pub const usage_init =
 ;
 
 pub fn cmdInit(
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     args: []const []const u8,
     output_mode: std.builtin.OutputMode,
 ) !void {
@@ -3196,7 +3196,7 @@ pub const usage_build =
     \\
 ;
 
-pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void {
+pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
     var prominent_compile_errors: bool = false;
 
     // We want to release all the locks before executing the child process, so we make a nice
@@ -3436,7 +3436,7 @@ pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !v
     }
 }
 
-fn argvCmd(allocator: *Allocator, argv: []const []const u8) ![]u8 {
+fn argvCmd(allocator: Allocator, argv: []const []const u8) ![]u8 {
     var cmd = std.ArrayList(u8).init(allocator);
     defer cmd.deinit();
     for (argv[0 .. argv.len - 1]) |arg| {
@@ -3448,7 +3448,7 @@ fn argvCmd(allocator: *Allocator, argv: []const []const u8) ![]u8 {
 }
 
 fn readSourceFileToEndAlloc(
-    allocator: *mem.Allocator,
+    allocator: mem.Allocator,
     input: *const fs.File,
     size_hint: ?usize,
 ) ![:0]u8 {
@@ -3518,14 +3518,14 @@ const Fmt = struct {
     any_error: bool,
     check_ast: bool,
     color: Color,
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     out_buffer: std.ArrayList(u8),
 
     const SeenMap = std.AutoHashMap(fs.File.INode, void);
 };
 
-pub fn cmdFmt(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void {
+pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
     var color: Color = .auto;
     var stdin_flag: bool = false;
     var check_flag: bool = false;
@@ -3855,8 +3855,8 @@ fn fmtPathFile(
 }
 
 fn printErrMsgToStdErr(
-    gpa: *mem.Allocator,
-    arena: *mem.Allocator,
+    gpa: mem.Allocator,
+    arena: mem.Allocator,
     parse_error: Ast.Error,
     tree: Ast,
     path: []const u8,
@@ -3938,7 +3938,7 @@ extern "c" fn ZigClang_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
 extern "c" fn ZigLlvmAr_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int;
 
 /// TODO https://github.com/ziglang/zig/issues/3257
-fn punt_to_clang(arena: *Allocator, args: []const []const u8) error{OutOfMemory} {
+fn punt_to_clang(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
     if (!build_options.have_llvm)
         fatal("`zig cc` and `zig c++` unavailable: compiler built without LLVM extensions", .{});
     // Convert the args to the format Clang expects.
@@ -3952,7 +3952,7 @@ fn punt_to_clang(arena: *Allocator, args: []const []const u8) error{OutOfMemory}
 }
 
 /// TODO https://github.com/ziglang/zig/issues/3257
-fn punt_to_llvm_ar(arena: *Allocator, args: []const []const u8) error{OutOfMemory} {
+fn punt_to_llvm_ar(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
     if (!build_options.have_llvm)
         fatal("`zig ar`, `zig dlltool`, `zig ranlib', and `zig lib` unavailable: compiler built without LLVM extensions", .{});
 
@@ -3973,7 +3973,7 @@ fn punt_to_llvm_ar(arena: *Allocator, args: []const []const u8) error{OutOfMemor
 /// * `lld-link` - COFF
 /// * `wasm-ld` - WebAssembly
 /// TODO https://github.com/ziglang/zig/issues/3257
-pub fn punt_to_lld(arena: *Allocator, args: []const []const u8) error{OutOfMemory} {
+pub fn punt_to_lld(arena: Allocator, args: []const []const u8) error{OutOfMemory} {
     if (!build_options.have_llvm)
         fatal("`zig {s}` unavailable: compiler built without LLVM extensions", .{args[0]});
     // Convert the args to the format LLD expects.
@@ -4009,7 +4009,7 @@ pub const ClangArgIterator = struct {
     argv: []const []const u8,
     next_index: usize,
     root_args: ?*Args,
-    allocator: *Allocator,
+    allocator: Allocator,
 
     pub const ZigEquivalent = enum {
         target,
@@ -4069,7 +4069,7 @@ pub const ClangArgIterator = struct {
         argv: []const []const u8,
     };
 
-    fn init(allocator: *Allocator, argv: []const []const u8) ClangArgIterator {
+    fn init(allocator: Allocator, argv: []const []const u8) ClangArgIterator {
         return .{
             .next_index = 2, // `zig cc foo` this points to `foo`
             .has_next = argv.len > 2,
@@ -4308,7 +4308,7 @@ test "fds" {
     gimmeMoreOfThoseSweetSweetFileDescriptors();
 }
 
-fn detectNativeTargetInfo(gpa: *Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo {
+fn detectNativeTargetInfo(gpa: Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo {
     return std.zig.system.NativeTargetInfo.detect(gpa, cross_target);
 }
 
@@ -4343,8 +4343,8 @@ const usage_ast_check =
 ;
 
 pub fn cmdAstCheck(
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     args: []const []const u8,
 ) !void {
     const Module = @import("Module.zig");
@@ -4513,8 +4513,8 @@ pub fn cmdAstCheck(
 
 /// This is only enabled for debug builds.
 pub fn cmdChangelist(
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     args: []const []const u8,
 ) !void {
     const Module = @import("Module.zig");
diff --git a/src/mingw.zig b/src/mingw.zig
index 8738bcbbcd73..b2628553b9bf 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -252,7 +252,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
 
 fn add_cc_args(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     args: *std.ArrayList([]const u8),
 ) error{OutOfMemory}!void {
     try args.appendSlice(&[_][]const u8{
@@ -428,7 +428,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
 }
 
 /// This function body is verbose but all it does is test 3 different paths and see if a .def file exists.
-fn findDef(comp: *Compilation, allocator: *Allocator, lib_name: []const u8) ![]u8 {
+fn findDef(comp: *Compilation, allocator: Allocator, lib_name: []const u8) ![]u8 {
     const target = comp.getTarget();
 
     const lib_path = switch (target.cpu.arch) {
diff --git a/src/musl.zig b/src/musl.zig
index d0f11af6043a..b9d00c4b124f 100644
--- a/src/musl.zig
+++ b/src/musl.zig
@@ -310,7 +310,7 @@ const Ext = enum {
     o3,
 };
 
-fn addSrcFile(arena: *Allocator, source_table: *std.StringArrayHashMap(Ext), file_path: []const u8) !void {
+fn addSrcFile(arena: Allocator, source_table: *std.StringArrayHashMap(Ext), file_path: []const u8) !void {
     const ext: Ext = ext: {
         if (mem.endsWith(u8, file_path, ".c")) {
             if (mem.startsWith(u8, file_path, "musl/src/malloc/") or
@@ -344,7 +344,7 @@ fn addSrcFile(arena: *Allocator, source_table: *std.StringArrayHashMap(Ext), fil
 
 fn addCcArgs(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     args: *std.ArrayList([]const u8),
     want_O3: bool,
 ) error{OutOfMemory}!void {
@@ -394,7 +394,7 @@ fn addCcArgs(
     });
 }
 
-fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) ![]const u8 {
+fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
     const target = comp.getTarget();
     return comp.zig_lib_directory.join(arena, &[_][]const u8{
         "libc", "musl", "crt", archName(target.cpu.arch), basename,
diff --git a/src/print_air.zig b/src/print_air.zig
index dc6a1773e74d..86fc6a63962d 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -8,7 +8,7 @@ const Zir = @import("Zir.zig");
 const Air = @import("Air.zig");
 const Liveness = @import("Liveness.zig");
 
-pub fn dump(gpa: *Allocator, air: Air, zir: Zir, liveness: Liveness) void {
+pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void {
     const instruction_bytes = air.instructions.len *
         // Here we don't use @sizeOf(Air.Inst.Data) because it would include
         // the debug safety tag but we want to measure release size.
@@ -60,8 +60,8 @@ pub fn dump(gpa: *Allocator, air: Air, zir: Zir, liveness: Liveness) void {
 }
 
 const Writer = struct {
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     air: Air,
     zir: Zir,
     liveness: Liveness,
diff --git a/src/print_env.zig b/src/print_env.zig
index 8c44e85e6599..15f038c50eed 100644
--- a/src/print_env.zig
+++ b/src/print_env.zig
@@ -4,7 +4,7 @@ const introspect = @import("introspect.zig");
 const Allocator = std.mem.Allocator;
 const fatal = @import("main.zig").fatal;
 
-pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
+pub fn cmdEnv(gpa: Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
     _ = args;
     const self_exe_path = try std.fs.selfExePathAlloc(gpa);
     defer gpa.free(self_exe_path);
diff --git a/src/print_targets.zig b/src/print_targets.zig
index d0a1d5167a86..07335c466897 100644
--- a/src/print_targets.zig
+++ b/src/print_targets.zig
@@ -11,7 +11,7 @@ const introspect = @import("introspect.zig");
 const fatal = @import("main.zig").fatal;
 
 pub fn cmdTargets(
-    allocator: *Allocator,
+    allocator: Allocator,
     args: []const []const u8,
     /// Output stream
     stdout: anytype,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index a3988986f059..9532b33ccd63 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -10,7 +10,7 @@ const LazySrcLoc = Module.LazySrcLoc;
 
 /// Write human-readable, debug formatted ZIR code to a file.
 pub fn renderAsTextToFile(
-    gpa: *Allocator,
+    gpa: Allocator,
     scope_file: *Module.File,
     fs_file: std.fs.File,
 ) !void {
@@ -61,7 +61,7 @@ pub fn renderAsTextToFile(
 }
 
 pub fn renderInstructionContext(
-    gpa: *Allocator,
+    gpa: Allocator,
     block: []const Zir.Inst.Index,
     block_index: usize,
     scope_file: *Module.File,
@@ -94,7 +94,7 @@ pub fn renderInstructionContext(
 }
 
 pub fn renderSingleInstruction(
-    gpa: *Allocator,
+    gpa: Allocator,
     inst: Zir.Inst.Index,
     scope_file: *Module.File,
     parent_decl_node: Ast.Node.Index,
@@ -120,8 +120,8 @@ pub fn renderSingleInstruction(
 }
 
 const Writer = struct {
-    gpa: *Allocator,
-    arena: *Allocator,
+    gpa: Allocator,
+    arena: Allocator,
     file: *Module.File,
     code: Zir,
     indent: u32,
diff --git a/src/register_manager.zig b/src/register_manager.zig
index 47528e53d6af..43e19e2ca30c 100644
--- a/src/register_manager.zig
+++ b/src/register_manager.zig
@@ -254,7 +254,7 @@ const MockRegister2 = enum(u2) {
 
 fn MockFunction(comptime Register: type) type {
     return struct {
-        allocator: *Allocator,
+        allocator: Allocator,
         register_manager: RegisterManager(Self, Register, &Register.callee_preserved_regs) = .{},
         spilled: std.ArrayListUnmanaged(Register) = .{},
 
diff --git a/src/test.zig b/src/test.zig
index 960aac7bc7a9..a9c1905b3636 100644
--- a/src/test.zig
+++ b/src/test.zig
@@ -680,7 +680,7 @@ pub const TestContext = struct {
     }
 
     fn runOneCase(
-        allocator: *Allocator,
+        allocator: Allocator,
         root_node: *std.Progress.Node,
         case: Case,
         zig_lib_directory: Compilation.Directory,
diff --git a/src/tracy.zig b/src/tracy.zig
index 3ef2df7ca031..8abd78110fd7 100644
--- a/src/tracy.zig
+++ b/src/tracy.zig
@@ -103,18 +103,18 @@ pub inline fn traceNamed(comptime src: std.builtin.SourceLocation, comptime name
     }
 }
 
-pub fn tracyAllocator(allocator: *std.mem.Allocator) TracyAllocator(null) {
+pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) {
     return TracyAllocator(null).init(allocator);
 }
 
 pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
     return struct {
         allocator: std.mem.Allocator,
-        parent_allocator: *std.mem.Allocator,
+        parent_allocator: std.mem.Allocator,
 
         const Self = @This();
 
-        pub fn init(allocator: *std.mem.Allocator) Self {
+        pub fn init(allocator: std.mem.Allocator) Self {
             return .{
                 .parent_allocator = allocator,
                 .allocator = .{
@@ -124,7 +124,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
             };
         }
 
-        fn allocFn(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
+        fn allocFn(allocator: std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
             const self = @fieldParentPtr(Self, "allocator", allocator);
             const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ret_addr);
             if (result) |data| {
@@ -141,7 +141,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
             return result;
         }
 
-        fn resizeFn(allocator: *std.mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
+        fn resizeFn(allocator: std.mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
             const self = @fieldParentPtr(Self, "allocator", allocator);
 
             if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ret_addr)) |resized_len| {
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 627f7e52503a..109535d0810f 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -305,8 +305,8 @@ const Scope = struct {
 };
 
 pub const Context = struct {
-    gpa: *mem.Allocator,
-    arena: *mem.Allocator,
+    gpa: mem.Allocator,
+    arena: mem.Allocator,
     source_manager: *clang.SourceManager,
     decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{},
     alias_list: AliasList,
@@ -351,7 +351,7 @@ pub const Context = struct {
 };
 
 pub fn translate(
-    gpa: *mem.Allocator,
+    gpa: mem.Allocator,
     args_begin: [*]?[*]const u8,
     args_end: [*]?[*]const u8,
     errors: *[]ClangErrMsg,
@@ -1448,7 +1448,7 @@ fn makeShuffleMask(c: *Context, scope: *Scope, expr: *const clang.ShuffleVectorE
 }
 
 /// @typeInfo(@TypeOf(vec_node)).Vector.
-fn vectorTypeInfo(arena: *mem.Allocator, vec_node: Node, field: []const u8) TransError!Node {
+fn vectorTypeInfo(arena: mem.Allocator, vec_node: Node, field: []const u8) TransError!Node {
     const typeof_call = try Tag.typeof.create(arena, vec_node);
     const typeinfo_call = try Tag.typeinfo.create(arena, typeof_call);
     const vector_type_info = try Tag.field_access.create(arena, .{ .lhs = typeinfo_call, .field_name = "Vector" });
@@ -1536,7 +1536,7 @@ fn transOffsetOfExpr(
 /// will become very large positive numbers but that is ok since we only use this in
 /// pointer arithmetic expressions, where wraparound will ensure we get the correct value.
 /// node -> @bitCast(usize, @intCast(isize, node))
-fn usizeCastForWrappingPtrArithmetic(gpa: *mem.Allocator, node: Node) TransError!Node {
+fn usizeCastForWrappingPtrArithmetic(gpa: mem.Allocator, node: Node) TransError!Node {
     const intcast_node = try Tag.int_cast.create(gpa, .{
         .lhs = try Tag.type.create(gpa, "isize"),
         .rhs = node,
@@ -5072,7 +5072,7 @@ const PatternList = struct {
     };
 
     /// Assumes that `ms` represents a tokenized function-like macro.
-    fn buildArgsHash(allocator: *mem.Allocator, ms: MacroSlicer, hash: *ArgsPositionMap) MacroProcessingError!void {
+    fn buildArgsHash(allocator: mem.Allocator, ms: MacroSlicer, hash: *ArgsPositionMap) MacroProcessingError!void {
         assert(ms.tokens.len > 2);
         assert(ms.tokens[0].id == .Identifier);
         assert(ms.tokens[1].id == .LParen);
@@ -5098,7 +5098,7 @@ const PatternList = struct {
         impl: []const u8,
         args_hash: ArgsPositionMap,
 
-        fn init(self: *Pattern, allocator: *mem.Allocator, template: [2][]const u8) Error!void {
+        fn init(self: *Pattern, allocator: mem.Allocator, template: [2][]const u8) Error!void {
             const source = template[0];
             const impl = template[1];
 
@@ -5120,7 +5120,7 @@ const PatternList = struct {
             };
         }
 
-        fn deinit(self: *Pattern, allocator: *mem.Allocator) void {
+        fn deinit(self: *Pattern, allocator: mem.Allocator) void {
             self.args_hash.deinit(allocator);
             allocator.free(self.tokens);
         }
@@ -5171,7 +5171,7 @@ const PatternList = struct {
         }
     };
 
-    fn init(allocator: *mem.Allocator) Error!PatternList {
+    fn init(allocator: mem.Allocator) Error!PatternList {
         const patterns = try allocator.alloc(Pattern, templates.len);
         for (templates) |template, i| {
             try patterns[i].init(allocator, template);
@@ -5179,12 +5179,12 @@ const PatternList = struct {
         return PatternList{ .patterns = patterns };
     }
 
-    fn deinit(self: *PatternList, allocator: *mem.Allocator) void {
+    fn deinit(self: *PatternList, allocator: mem.Allocator) void {
         for (self.patterns) |*pattern| pattern.deinit(allocator);
         allocator.free(self.patterns);
     }
 
-    fn match(self: PatternList, allocator: *mem.Allocator, ms: MacroSlicer) Error!?Pattern {
+    fn match(self: PatternList, allocator: mem.Allocator, ms: MacroSlicer) Error!?Pattern {
         var args_hash: ArgsPositionMap = .{};
         defer args_hash.deinit(allocator);
 
@@ -5211,7 +5211,7 @@ const MacroSlicer = struct {
 test "Macro matching" {
     const helper = struct {
         const MacroFunctions = @import("std").zig.c_translation.Macros;
-        fn checkMacro(allocator: *mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
+        fn checkMacro(allocator: mem.Allocator, pattern_list: PatternList, source: []const u8, comptime expected_match: ?[]const u8) !void {
             var tok_list = std.ArrayList(CToken).init(allocator);
             defer tok_list.deinit();
             try tokenizeMacro(source, &tok_list);
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index a4e64e1966b9..96de020b0c65 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -378,7 +378,7 @@ pub const Node = extern union {
             return .{ .tag_if_small_enough = @enumToInt(t) };
         }
 
-        pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Node {
+        pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!Node {
             const ptr = try ally.create(t.Type());
             ptr.* = .{
                 .base = .{ .tag = t },
@@ -717,7 +717,7 @@ pub const Payload = struct {
 
 /// Converts the nodes into a Zig Ast.
 /// Caller must free the source slice.
-pub fn render(gpa: *Allocator, nodes: []const Node) !std.zig.Ast {
+pub fn render(gpa: Allocator, nodes: []const Node) !std.zig.Ast {
     var ctx = Context{
         .gpa = gpa,
         .buf = std.ArrayList(u8).init(gpa),
@@ -783,7 +783,7 @@ const TokenIndex = std.zig.Ast.TokenIndex;
 const TokenTag = std.zig.Token.Tag;
 
 const Context = struct {
-    gpa: *Allocator,
+    gpa: Allocator,
     buf: std.ArrayList(u8) = .{},
     nodes: std.zig.Ast.NodeList = .{},
     extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .{},
diff --git a/src/type.zig b/src/type.zig
index 33add3e0d1f6..979c5251d755 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -728,7 +728,7 @@ pub const Type = extern union {
         }
     };
 
-    pub fn copy(self: Type, allocator: *Allocator) error{OutOfMemory}!Type {
+    pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type {
         if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
             return Type{ .tag_if_small_enough = self.tag_if_small_enough };
         } else switch (self.ptr_otherwise.tag) {
@@ -905,7 +905,7 @@ pub const Type = extern union {
         }
     }
 
-    fn copyPayloadShallow(self: Type, allocator: *Allocator, comptime T: type) error{OutOfMemory}!Type {
+    fn copyPayloadShallow(self: Type, allocator: Allocator, comptime T: type) error{OutOfMemory}!Type {
         const payload = self.cast(T).?;
         const new_payload = try allocator.create(T);
         new_payload.* = payload.*;
@@ -1198,7 +1198,7 @@ pub const Type = extern union {
     }
 
     /// Returns a name suitable for `@typeName`.
-    pub fn nameAlloc(ty: Type, arena: *Allocator) Allocator.Error![:0]const u8 {
+    pub fn nameAlloc(ty: Type, arena: Allocator) Allocator.Error![:0]const u8 {
         const t = ty.tag();
         switch (t) {
             .inferred_alloc_const => unreachable,
@@ -1421,7 +1421,7 @@ pub const Type = extern union {
         };
     }
 
-    pub fn toValue(self: Type, allocator: *Allocator) Allocator.Error!Value {
+    pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
         switch (self.tag()) {
             .u1 => return Value.initTag(.u1_type),
             .u8 => return Value.initTag(.u8_type),
@@ -2676,7 +2676,7 @@ pub const Type = extern union {
     /// For [*]T, returns *T
     /// For []T, returns *T
     /// Handles const-ness and address spaces in particular.
-    pub fn elemPtrType(ptr_ty: Type, arena: *Allocator) !Type {
+    pub fn elemPtrType(ptr_ty: Type, arena: Allocator) !Type {
         return try Type.ptr(arena, .{
             .pointee_type = ptr_ty.elemType2(),
             .mutable = ptr_ty.ptrIsMutable(),
@@ -2731,7 +2731,7 @@ pub const Type = extern union {
 
     /// Asserts that the type is an optional.
     /// Same as `optionalChild` but allocates the buffer if needed.
-    pub fn optionalChildAlloc(ty: Type, allocator: *Allocator) !Type {
+    pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type {
         switch (ty.tag()) {
             .optional => return ty.castTag(.optional).?.data,
             .optional_single_mut_pointer => {
@@ -3379,7 +3379,7 @@ pub const Type = extern union {
     }
 
     /// Asserts that self.zigTypeTag() == .Int.
-    pub fn minInt(self: Type, arena: *Allocator, target: Target) !Value {
+    pub fn minInt(self: Type, arena: Allocator, target: Target) !Value {
         assert(self.zigTypeTag() == .Int);
         const info = self.intInfo(target);
 
@@ -3404,7 +3404,7 @@ pub const Type = extern union {
     }
 
     /// Asserts that self.zigTypeTag() == .Int.
-    pub fn maxInt(self: Type, arena: *Allocator, target: Target) !Value {
+    pub fn maxInt(self: Type, arena: Allocator, target: Target) !Value {
         assert(self.zigTypeTag() == .Int);
         const info = self.intInfo(target);
 
@@ -4008,7 +4008,7 @@ pub const Type = extern union {
             return .{ .tag_if_small_enough = t };
         }
 
-        pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type {
+        pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type {
             const p = try ally.create(t.Type());
             p.* = .{
                 .base = .{ .tag = t },
@@ -4104,7 +4104,7 @@ pub const Type = extern union {
                 functions: std.AutoHashMapUnmanaged(*Module.Fn, void),
                 is_anyerror: bool,
 
-                pub fn addErrorSet(self: *Data, gpa: *Allocator, err_set_ty: Type) !void {
+                pub fn addErrorSet(self: *Data, gpa: Allocator, err_set_ty: Type) !void {
                     switch (err_set_ty.tag()) {
                         .error_set => {
                             const names = err_set_ty.castTag(.error_set).?.data.names();
@@ -4225,7 +4225,7 @@ pub const Type = extern union {
     pub const @"type" = initTag(.type);
     pub const @"anyerror" = initTag(.anyerror);
 
-    pub fn ptr(arena: *Allocator, d: Payload.Pointer.Data) !Type {
+    pub fn ptr(arena: Allocator, d: Payload.Pointer.Data) !Type {
         assert(d.host_size == 0 or d.bit_offset < d.host_size * 8);
 
         if (d.sentinel != null or d.@"align" != 0 or d.@"addrspace" != .generic or
@@ -4260,7 +4260,7 @@ pub const Type = extern union {
     }
 
     pub fn array(
-        arena: *Allocator,
+        arena: Allocator,
         len: u64,
         sent: ?Value,
         elem_type: Type,
@@ -4289,14 +4289,14 @@ pub const Type = extern union {
         });
     }
 
-    pub fn vector(arena: *Allocator, len: u64, elem_type: Type) Allocator.Error!Type {
+    pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type {
         return Tag.vector.create(arena, .{
             .len = len,
             .elem_type = elem_type,
         });
     }
 
-    pub fn optional(arena: *Allocator, child_type: Type) Allocator.Error!Type {
+    pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type {
         switch (child_type.tag()) {
             .single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
                 arena,
@@ -4317,7 +4317,7 @@ pub const Type = extern union {
         return @intCast(u16, base + @boolToInt(upper < max));
     }
 
-    pub fn smallestUnsignedInt(arena: *Allocator, max: u64) !Type {
+    pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type {
         const bits = smallestUnsignedBits(max);
         return switch (bits) {
             1 => initTag(.u1),
diff --git a/src/value.zig b/src/value.zig
index 1f08a8e3369b..02e27cd49855 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -297,7 +297,7 @@ pub const Value = extern union {
             };
         }
 
-        pub fn create(comptime t: Tag, ally: *Allocator, data: Data(t)) error{OutOfMemory}!Value {
+        pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!Value {
             const ptr = try ally.create(t.Type());
             ptr.* = .{
                 .base = .{ .tag = t },
@@ -363,7 +363,7 @@ pub const Value = extern union {
 
     /// It's intentional that this function is not passed a corresponding Type, so that
     /// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types.
-    pub fn copy(self: Value, arena: *Allocator) error{OutOfMemory}!Value {
+    pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value {
         if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) {
             return Value{ .tag_if_small_enough = self.tag_if_small_enough };
         } else switch (self.ptr_otherwise.tag) {
@@ -578,7 +578,7 @@ pub const Value = extern union {
         }
     }
 
-    fn copyPayloadShallow(self: Value, arena: *Allocator, comptime T: type) error{OutOfMemory}!Value {
+    fn copyPayloadShallow(self: Value, arena: Allocator, comptime T: type) error{OutOfMemory}!Value {
         const payload = self.cast(T).?;
         const new_payload = try arena.create(T);
         new_payload.* = payload.*;
@@ -747,7 +747,7 @@ pub const Value = extern union {
 
     /// Asserts that the value is representable as an array of bytes.
     /// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
-    pub fn toAllocatedBytes(val: Value, ty: Type, allocator: *Allocator) ![]u8 {
+    pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator) ![]u8 {
         switch (val.tag()) {
             .bytes => {
                 const bytes = val.castTag(.bytes).?.data;
@@ -1035,7 +1035,7 @@ pub const Value = extern union {
         }
     }
 
-    pub fn readFromMemory(ty: Type, target: Target, buffer: []const u8, arena: *Allocator) !Value {
+    pub fn readFromMemory(ty: Type, target: Target, buffer: []const u8, arena: Allocator) !Value {
         switch (ty.zigTypeTag()) {
             .Int => {
                 const int_info = ty.intInfo(target);
@@ -1185,7 +1185,7 @@ pub const Value = extern union {
         }
     }
 
-    pub fn popCount(val: Value, ty: Type, target: Target, arena: *Allocator) !Value {
+    pub fn popCount(val: Value, ty: Type, target: Target, arena: Allocator) !Value {
         assert(!val.isUndef());
 
         const info = ty.intInfo(target);
@@ -1273,7 +1273,7 @@ pub const Value = extern union {
 
     /// Converts an integer or a float to a float. May result in a loss of information.
     /// Caller can find out by equality checking the result against the operand.
-    pub fn floatCast(self: Value, arena: *Allocator, dest_ty: Type) !Value {
+    pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type) !Value {
         switch (dest_ty.tag()) {
             .f16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)),
             .f32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)),
@@ -1678,7 +1678,7 @@ pub const Value = extern union {
 
     /// Asserts the value is a single-item pointer to an array, or an array,
     /// or an unknown-length pointer, and returns the element value at the index.
-    pub fn elemValue(val: Value, arena: *Allocator, index: usize) !Value {
+    pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value {
         return elemValueAdvanced(val, index, arena, undefined);
     }
 
@@ -1691,7 +1691,7 @@ pub const Value = extern union {
     pub fn elemValueAdvanced(
         val: Value,
         index: usize,
-        arena: ?*Allocator,
+        arena: ?Allocator,
         buffer: *ElemValueBuffer,
     ) error{OutOfMemory}!Value {
         switch (val.tag()) {
@@ -1732,7 +1732,7 @@ pub const Value = extern union {
         }
     }
 
-    pub fn fieldValue(val: Value, allocator: *Allocator, index: usize) error{OutOfMemory}!Value {
+    pub fn fieldValue(val: Value, allocator: Allocator, index: usize) error{OutOfMemory}!Value {
         _ = allocator;
         switch (val.tag()) {
             .@"struct" => {
@@ -1760,7 +1760,7 @@ pub const Value = extern union {
     }
 
     /// Returns a pointer to the element value at the index.
-    pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value {
+    pub fn elemPtr(self: Value, allocator: Allocator, index: usize) !Value {
         switch (self.tag()) {
             .elem_ptr => {
                 const elem_ptr = self.castTag(.elem_ptr).?.data;
@@ -1874,7 +1874,7 @@ pub const Value = extern union {
         };
     }
 
-    pub fn intToFloat(val: Value, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+    pub fn intToFloat(val: Value, arena: Allocator, dest_ty: Type, target: Target) !Value {
         switch (val.tag()) {
             .undef, .zero, .one => return val,
             .the_only_possible_value => return Value.initTag(.zero), // for i0, u0
@@ -1898,7 +1898,7 @@ pub const Value = extern union {
         }
     }
 
-    fn intToFloatInner(x: anytype, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+    fn intToFloatInner(x: anytype, arena: Allocator, dest_ty: Type, target: Target) !Value {
         switch (dest_ty.floatBits(target)) {
             16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)),
             32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)),
@@ -1908,7 +1908,7 @@ pub const Value = extern union {
         }
     }
 
-    fn floatToValue(float: f128, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+    fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value {
         switch (dest_ty.floatBits(target)) {
             16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
             32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
@@ -1918,7 +1918,7 @@ pub const Value = extern union {
         }
     }
 
-    pub fn floatToInt(val: Value, arena: *Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
+    pub fn floatToInt(val: Value, arena: Allocator, dest_ty: Type, target: Target) error{ FloatCannotFit, OutOfMemory }!Value {
         const Limb = std.math.big.Limb;
 
         var value = val.toFloat(f64); // TODO: f128 ?
@@ -1969,7 +1969,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -1993,7 +1993,7 @@ pub const Value = extern union {
         return fromBigInt(arena, result_bigint.toConst());
     }
 
-    fn fromBigInt(arena: *Allocator, big_int: BigIntConst) !Value {
+    fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value {
         if (big_int.positive) {
             if (big_int.to(u64)) |x| {
                 return Value.Tag.int_u64.create(arena, x);
@@ -2014,7 +2014,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         assert(!lhs.isUndef());
@@ -2040,7 +2040,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -2069,7 +2069,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         assert(!lhs.isUndef());
@@ -2095,7 +2095,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -2129,7 +2129,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         assert(!lhs.isUndef());
@@ -2185,7 +2185,7 @@ pub const Value = extern union {
     }
 
     /// operands must be integers; handles undefined.
-    pub fn bitwiseNot(val: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+    pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, target: Target) !Value {
         if (val.isUndef()) return Value.initTag(.undef);
 
         const info = ty.intInfo(target);
@@ -2205,7 +2205,7 @@ pub const Value = extern union {
     }
 
     /// operands must be integers; handles undefined. 
-    pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+    pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: Allocator) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
 
         // TODO is this a performance issue? maybe we should try the operation without
@@ -2225,7 +2225,7 @@ pub const Value = extern union {
     }
 
     /// operands must be integers; handles undefined. 
-    pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+    pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, target: Target) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
 
         const anded = try bitwiseAnd(lhs, rhs, arena);
@@ -2239,7 +2239,7 @@ pub const Value = extern union {
     }
 
     /// operands must be integers; handles undefined. 
-    pub fn bitwiseOr(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+    pub fn bitwiseOr(lhs: Value, rhs: Value, arena: Allocator) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
 
         // TODO is this a performance issue? maybe we should try the operation without
@@ -2258,7 +2258,7 @@ pub const Value = extern union {
     }
 
     /// operands must be integers; handles undefined. 
-    pub fn bitwiseXor(lhs: Value, rhs: Value, arena: *Allocator) !Value {
+    pub fn bitwiseXor(lhs: Value, rhs: Value, arena: Allocator) !Value {
         if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
 
         // TODO is this a performance issue? maybe we should try the operation without
@@ -2277,7 +2277,7 @@ pub const Value = extern union {
         return fromBigInt(arena, result_bigint.toConst());
     }
 
-    pub fn intAdd(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intAdd(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2293,7 +2293,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_bigint.toConst());
     }
 
-    pub fn intSub(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intSub(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2309,7 +2309,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_bigint.toConst());
     }
 
-    pub fn intDiv(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intDiv(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2334,7 +2334,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_q.toConst());
     }
 
-    pub fn intDivFloor(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intDivFloor(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2359,7 +2359,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_q.toConst());
     }
 
-    pub fn intRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2386,7 +2386,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_r.toConst());
     }
 
-    pub fn intMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2422,21 +2422,21 @@ pub const Value = extern union {
         };
     }
 
-    pub fn floatRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn floatRem(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         _ = lhs;
         _ = rhs;
         _ = allocator;
         @panic("TODO implement Value.floatRem");
     }
 
-    pub fn floatMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn floatMod(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         _ = lhs;
         _ = rhs;
         _ = allocator;
         @panic("TODO implement Value.floatMod");
     }
 
-    pub fn intMul(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn intMul(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2457,7 +2457,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_bigint.toConst());
     }
 
-    pub fn intTrunc(val: Value, allocator: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
+    pub fn intTrunc(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16) !Value {
         var val_space: Value.BigIntSpace = undefined;
         const val_bigint = val.toBigInt(&val_space);
 
@@ -2471,7 +2471,7 @@ pub const Value = extern union {
         return fromBigInt(allocator, result_bigint.toConst());
     }
 
-    pub fn shl(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn shl(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2494,7 +2494,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         ty: Type,
-        arena: *Allocator,
+        arena: Allocator,
         target: Target,
     ) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
@@ -2517,7 +2517,7 @@ pub const Value = extern union {
         return fromBigInt(arena, result_bigint.toConst());
     }
 
-    pub fn shr(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+    pub fn shr(lhs: Value, rhs: Value, allocator: Allocator) !Value {
         // TODO is this a performance issue? maybe we should try the operation without
         // resorting to BigInt first.
         var lhs_space: Value.BigIntSpace = undefined;
@@ -2540,7 +2540,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         float_type: Type,
-        arena: *Allocator,
+        arena: Allocator,
     ) !Value {
         switch (float_type.tag()) {
             .f16 => {
@@ -2571,7 +2571,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         float_type: Type,
-        arena: *Allocator,
+        arena: Allocator,
     ) !Value {
         switch (float_type.tag()) {
             .f16 => {
@@ -2602,7 +2602,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         float_type: Type,
-        arena: *Allocator,
+        arena: Allocator,
     ) !Value {
         switch (float_type.tag()) {
             .f16 => {
@@ -2633,7 +2633,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         float_type: Type,
-        arena: *Allocator,
+        arena: Allocator,
     ) !Value {
         switch (float_type.tag()) {
             .f16 => {
@@ -2664,7 +2664,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         float_type: Type,
-        arena: *Allocator,
+        arena: Allocator,
     ) !Value {
         switch (float_type.tag()) {
             .f16 => {
@@ -2695,7 +2695,7 @@ pub const Value = extern union {
         lhs: Value,
         rhs: Value,
         float_type: Type,
-        arena: *Allocator,
+        arena: Allocator,
     ) !Value {
         switch (float_type.tag()) {
             .f16 => {
diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig
index 0ef8d4f4d474..18906cb6c710 100644
--- a/src/wasi_libc.zig
+++ b/src/wasi_libc.zig
@@ -243,7 +243,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
     }
 }
 
-fn sanitize(arena: *Allocator, file_path: []const u8) ![]const u8 {
+fn sanitize(arena: Allocator, file_path: []const u8) ![]const u8 {
     // TODO do this at comptime on the comptime data rather than at runtime
     // probably best to wait until self-hosted is done and our comptime execution
     // is faster and uses less memory.
@@ -261,7 +261,7 @@ fn sanitize(arena: *Allocator, file_path: []const u8) ![]const u8 {
 
 fn addCCArgs(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     args: *std.ArrayList([]const u8),
     want_O3: bool,
 ) error{OutOfMemory}!void {
@@ -292,7 +292,7 @@ fn addCCArgs(
 
 fn addLibcBottomHalfIncludes(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     args: *std.ArrayList([]const u8),
 ) error{OutOfMemory}!void {
     try args.appendSlice(&[_][]const u8{
@@ -328,7 +328,7 @@ fn addLibcBottomHalfIncludes(
 
 fn addLibcTopHalfIncludes(
     comp: *Compilation,
-    arena: *Allocator,
+    arena: Allocator,
     args: *std.ArrayList([]const u8),
 ) error{OutOfMemory}!void {
     try args.appendSlice(&[_][]const u8{
diff --git a/test/behavior/async_fn.zig b/test/behavior/async_fn.zig
index cd4dc1c7dc1a..b423ce8d1f83 100644
--- a/test/behavior/async_fn.zig
+++ b/test/behavior/async_fn.zig
@@ -713,7 +713,7 @@ fn testAsyncAwaitTypicalUsage(
         }
 
         var global_download_frame: anyframe = undefined;
-        fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 {
+        fn fetchUrl(allocator: std.mem.Allocator, url: []const u8) anyerror![]u8 {
             _ = url;
             const result = try allocator.dupe(u8, "expected download text");
             errdefer allocator.free(result);
@@ -727,7 +727,7 @@ fn testAsyncAwaitTypicalUsage(
         }
 
         var global_file_frame: anyframe = undefined;
-        fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 {
+        fn readFile(allocator: std.mem.Allocator, filename: []const u8) anyerror![]u8 {
             _ = filename;
             const result = try allocator.dupe(u8, "expected file text");
             errdefer allocator.free(result);
@@ -912,7 +912,7 @@ test "recursive async function" {
 
 fn recursiveAsyncFunctionTest(comptime suspending_implementation: bool) type {
     return struct {
-        fn fib(allocator: *std.mem.Allocator, x: u32) error{OutOfMemory}!u32 {
+        fn fib(allocator: std.mem.Allocator, x: u32) error{OutOfMemory}!u32 {
             if (x <= 1) return x;
 
             if (suspending_implementation) {
diff --git a/test/cli.zig b/test/cli.zig
index 837aef422963..3f50ebe4039f 100644
--- a/test/cli.zig
+++ b/test/cli.zig
@@ -5,7 +5,7 @@ const process = std.process;
 const fs = std.fs;
 const ChildProcess = std.ChildProcess;
 
-var a: *std.mem.Allocator = undefined;
+var a: std.mem.Allocator = undefined;
 
 pub fn main() !void {
     var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
diff --git a/tools/merge_anal_dumps.zig b/tools/merge_anal_dumps.zig
index d77869868596..648a76ebfb63 100644
--- a/tools/merge_anal_dumps.zig
+++ b/tools/merge_anal_dumps.zig
@@ -160,7 +160,7 @@ const Dump = struct {
     const ErrorMap = std.HashMap(Error, usize, Error.hash, Error.eql, 80);
     const TypeMap = std.HashMap(Type, usize, Type.hash, Type.eql, 80);
 
-    fn init(allocator: *mem.Allocator) Dump {
+    fn init(allocator: mem.Allocator) Dump {
         return Dump{
             .targets = std.ArrayList([]const u8).init(allocator),
             .file_list = std.ArrayList([]const u8).init(allocator),
@@ -434,7 +434,7 @@ const Dump = struct {
         try jw.endObject();
     }
 
-    fn a(self: Dump) *mem.Allocator {
+    fn a(self: Dump) mem.Allocator {
         return self.targets.allocator;
     }
 
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index f4632d001728..2eccb0ee1b4c 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -1244,7 +1244,7 @@ fn asciiLessThan(context: void, a: []const u8, b: []const u8) bool {
     return std.ascii.lessThanIgnoreCase(a, b);
 }
 
-fn llvmNameToZigName(arena: *mem.Allocator, llvm_name: []const u8) ![]const u8 {
+fn llvmNameToZigName(arena: mem.Allocator, llvm_name: []const u8) ![]const u8 {
     const duped = try arena.dupe(u8, llvm_name);
     for (duped) |*byte| switch (byte.*) {
         '-', '.' => byte.* = '_',
@@ -1254,7 +1254,7 @@ fn llvmNameToZigName(arena: *mem.Allocator, llvm_name: []const u8) ![]const u8 {
 }
 
 fn llvmNameToZigNameOmit(
-    arena: *mem.Allocator,
+    arena: mem.Allocator,
     llvm_target: LlvmTarget,
     llvm_name: []const u8,
 ) !?[]const u8 {
@@ -1279,7 +1279,7 @@ fn hasSuperclass(obj: *json.ObjectMap, class_name: []const u8) bool {
 }
 
 fn pruneFeatures(
-    arena: *mem.Allocator,
+    arena: mem.Allocator,
     features_table: std.StringHashMap(Feature),
     deps_set: *std.StringHashMap(void),
 ) !void {
diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig
index 69e8237f98b4..756d311ecc73 100644
--- a/tools/update_spirv_features.zig
+++ b/tools/update_spirv_features.zig
@@ -216,7 +216,7 @@ pub fn main() !void {
 /// The *.grammar.json in SPIRV-Headers should have most of these as well, but with this we're sure to get only the actually
 /// registered ones.
 /// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
-fn gather_extensions(allocator: *Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
+fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
     const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" });
     var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
     defer extensions_dir.close();
@@ -286,7 +286,7 @@ fn insertVersion(versions: *std.ArrayList(Version), version: ?[]const u8) !void
     try versions.append(ver);
 }
 
-fn gatherVersions(allocator: *Allocator, registry: g.CoreRegistry) ![]const Version {
+fn gatherVersions(allocator: Allocator, registry: g.CoreRegistry) ![]const Version {
     // Expected number of versions is small
     var versions = std.ArrayList(Version).init(allocator);
 

From 47bc13bc597622fc8deffa1c1a45d47dac51eeb0 Mon Sep 17 00:00:00 2001
From: Lee Cannon 
Date: Fri, 29 Oct 2021 00:41:58 +0100
Subject: [PATCH 02/10] allocgate: dont use a dummy temporary for stateless
 allocators

---
 lib/std/heap.zig | 58 +++++++++++++++++++++++-------------------------
 lib/std/mem.zig  | 11 +++++----
 2 files changed, 33 insertions(+), 36 deletions(-)

diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index c265fc240861..213953553f28 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -97,7 +97,7 @@ const CAllocator = struct {
     }
 
     fn alloc(
-        _: *u1,
+        _: *c_void,
         len: usize,
         alignment: u29,
         len_align: u29,
@@ -123,7 +123,7 @@ const CAllocator = struct {
     }
 
     fn resize(
-        _: *u1,
+        _: *c_void,
         buf: []u8,
         buf_align: u29,
         new_len: usize,
@@ -152,11 +152,10 @@ const CAllocator = struct {
 /// Supports the full Allocator interface, including alignment, and exploiting
 /// `malloc_usable_size` if available. For an allocator that directly calls
 /// `malloc`/`free`, see `raw_c_allocator`.
-pub const c_allocator = blk: {
-    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
-    // allowing the use of `*void` but it would still be ugly
-    var tmp: u1 = 0;
-    break :blk Allocator.init(&tmp, CAllocator.alloc, CAllocator.resize);
+pub const c_allocator = Allocator{
+    .ptr = undefined,
+    .allocFn = CAllocator.alloc,
+    .resizeFn = CAllocator.resize,
 };
 
 /// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
@@ -164,15 +163,14 @@ pub const c_allocator = blk: {
 /// This allocator is safe to use as the backing allocator with
 /// `ArenaAllocator` for example and is more optimal in such a case
 /// than `c_allocator`.
-pub const raw_c_allocator = blk: {
-    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
-    // allowing the use of `*void` but it would still be ugly
-    var tmp: u1 = 0;
-    break :blk Allocator.init(&tmp, rawCAlloc, rawCResize);
+pub const raw_c_allocator = Allocator{
+    .ptr = undefined,
+    .allocFn = rawCAlloc,
+    .resizeFn = rawCResize,
 };
 
 fn rawCAlloc(
-    _: *u1,
+    _: *c_void,
     len: usize,
     ptr_align: u29,
     len_align: u29,
@@ -186,7 +184,7 @@ fn rawCAlloc(
 }
 
 fn rawCResize(
-    _: *u1,
+    _: *c_void,
     buf: []u8,
     old_align: u29,
     new_len: usize,
@@ -208,19 +206,19 @@ fn rawCResize(
 /// This allocator makes a syscall directly for every allocation and free.
 /// Thread-safe and lock-free.
 pub const page_allocator = if (builtin.target.isWasm())
-blk: {
-    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
-    // allowing the use of `*void` but it would still be ugly
-    var tmp: u1 = 0;
-    break :blk Allocator.init(&tmp, WasmPageAllocator.alloc, WasmPageAllocator.resize);
-} else if (builtin.target.os.tag == .freestanding)
+    Allocator{
+        .ptr = undefined,
+        .allocFn = WasmPageAllocator.alloc,
+        .resizeFn = WasmPageAllocator.resize,
+    }
+else if (builtin.target.os.tag == .freestanding)
     root.os.heap.page_allocator
-else blk: {
-    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
-    // allowing the use of `*void` but it would still be ugly
-    var tmp: u1 = 0;
-    break :blk Allocator.init(&tmp, PageAllocator.alloc, PageAllocator.resize);
-};
+else
+    Allocator{
+        .ptr = undefined,
+        .allocFn = PageAllocator.alloc,
+        .resizeFn = PageAllocator.resize,
+    };
 
 /// Verifies that the adjusted length will still map to the full length
 pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
@@ -233,7 +231,7 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
 pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
 
 const PageAllocator = struct {
-    fn alloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
+    fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
         _ = ra;
         assert(n > 0);
         const aligned_len = mem.alignForward(n, mem.page_size);
@@ -331,7 +329,7 @@ const PageAllocator = struct {
     }
 
     fn resize(
-        _: *u1,
+        _: *c_void,
         buf_unaligned: []u8,
         buf_align: u29,
         new_size: usize,
@@ -487,7 +485,7 @@ const WasmPageAllocator = struct {
         return mem.alignForward(memsize, mem.page_size) / mem.page_size;
     }
 
-    fn alloc(_: *u1, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
+    fn alloc(_: *c_void, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
         _ = ra;
         const page_count = nPages(len);
         const page_idx = try allocPages(page_count, alignment);
@@ -542,7 +540,7 @@ const WasmPageAllocator = struct {
     }
 
     fn resize(
-        _: *u1,
+        _: *c_void,
         buf: []u8,
         buf_align: u29,
         new_len: usize,
diff --git a/lib/std/mem.zig b/lib/std/mem.zig
index b5dc50191178..4b43069d1e79 100644
--- a/lib/std/mem.zig
+++ b/lib/std/mem.zig
@@ -131,14 +131,13 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
     return adjusted;
 }
 
-const failAllocator = blk: {
-    // TODO: This is an ugly hack, it could be improved once https://github.com/ziglang/zig/issues/6706 is implemented
-    // allowing the use of `*void` but it would still be ugly
-    var tmp: u1 = 0;
-    break :blk Allocator.init(&tmp, failAllocatorAlloc, Allocator.NoResize(u1).noResize);
+const failAllocator = Allocator{
+    .ptr = undefined,
+    .allocFn = failAllocatorAlloc,
+    .resizeFn = Allocator.NoResize(c_void).noResize,
 };
 
-fn failAllocatorAlloc(_: *u1, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
+fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
     _ = n;
     _ = alignment;
     _ = len_align;

From 75548b50ff23a3de48d166170425001c073d27c1 Mon Sep 17 00:00:00 2001
From: Lee Cannon 
Date: Fri, 29 Oct 2021 02:03:15 +0100
Subject: [PATCH 03/10] allocgate: stage 1 and 2 building

---
 ci/srht/update-download-page.zig         |   2 +-
 doc/docgen.zig                           |   2 +-
 doc/langref.html.in                      |  26 ++---
 lib/std/event/loop.zig                   |   4 +-
 src/AstGen.zig                           |  15 +--
 src/Compilation.zig                      |  51 ++++-----
 src/DepTokenizer.zig                     |   2 +-
 src/Module.zig                           |  61 ++++++-----
 src/Sema.zig                             | 126 ++++++++++++-----------
 src/codegen/c.zig                        |   3 +-
 src/codegen/llvm.zig                     |  10 +-
 src/crash_report.zig                     |   2 +-
 src/glibc.zig                            |   6 +-
 src/libcxx.zig                           |   4 +-
 src/libtsan.zig                          |   2 +-
 src/libunwind.zig                        |   2 +-
 src/link.zig                             |   2 +-
 src/link/C.zig                           |   4 +-
 src/link/Coff.zig                        |   2 +-
 src/link/Elf.zig                         |   2 +-
 src/link/MachO.zig                       |   4 +-
 src/link/Plan9.zig                       |   2 +-
 src/link/Wasm.zig                        |   2 +-
 src/link/tapi.zig                        |   6 +-
 src/link/tapi/yaml.zig                   |  13 +--
 src/main.zig                             |   8 +-
 src/mingw.zig                            |   4 +-
 src/musl.zig                             |   2 +-
 src/print_air.zig                        |   2 +-
 src/print_zir.zig                        |   6 +-
 src/stage1.zig                           |   2 +-
 src/test.zig                             |   2 +-
 src/translate_c.zig                      |   5 +-
 src/wasi_libc.zig                        |   2 +-
 test/cli.zig                             |   2 +-
 test/compare_output.zig                  |   2 +-
 test/compile_errors.zig                  |   8 +-
 test/standalone/brace_expansion/main.zig |   2 +-
 test/standalone/cat/main.zig             |   2 +-
 tools/gen_spirv_spec.zig                 |   2 +-
 tools/gen_stubs.zig                      |   2 +-
 tools/merge_anal_dumps.zig               |   2 +-
 tools/process_headers.zig                |   2 +-
 tools/update-license-headers.zig         |   2 +-
 tools/update_clang_options.zig           |   5 +
 tools/update_cpu_features.zig            |   4 +-
 tools/update_glibc.zig                   |   2 +-
 tools/update_spirv_features.zig          |   2 +-
 48 files changed, 227 insertions(+), 200 deletions(-)

diff --git a/ci/srht/update-download-page.zig b/ci/srht/update-download-page.zig
index 175cf0abaf66..b16a8609a80d 100644
--- a/ci/srht/update-download-page.zig
+++ b/ci/srht/update-download-page.zig
@@ -6,7 +6,7 @@ pub fn main() !void {
     var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
     defer arena.deinit();
 
-    const allocator = &arena.allocator;
+    const allocator = arena.getAllocator();
 
     const out_dir = "out";
     try std.fs.cwd().makePath(out_dir);
diff --git a/doc/docgen.zig b/doc/docgen.zig
index ed469caf9ee7..1f6ff7461789 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -21,7 +21,7 @@ pub fn main() !void {
     var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
     defer arena.deinit();
 
-    const allocator = &arena.allocator;
+    const allocator = arena.getAllocator();
 
     var args_it = process.args();
 
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 89a9b16a2e22..631c9486286a 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -7362,7 +7362,7 @@ fn amain() !void {
 }
 
 var global_download_frame: anyframe = undefined;
-fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 {
+fn fetchUrl(allocator: Allocator, url: []const u8) ![]u8 {
     _ = url; // this is just an example, we don't actually do it!
     const result = try allocator.dupe(u8, "this is the downloaded url contents");
     errdefer allocator.free(result);
@@ -7374,7 +7374,7 @@ fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 {
 }
 
 var global_file_frame: anyframe = undefined;
-fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 {
+fn readFile(allocator: Allocator, filename: []const u8) ![]u8 {
     _ = filename; // this is just an example, we don't actually do it!
     const result = try allocator.dupe(u8, "this is the file contents");
     errdefer allocator.free(result);
@@ -7433,7 +7433,7 @@ fn amain() !void {
     std.debug.print("file_text: {s}\n", .{file_text});
 }
 
-fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 {
+fn fetchUrl(allocator: Allocator, url: []const u8) ![]u8 {
     _ = url; // this is just an example, we don't actually do it!
     const result = try allocator.dupe(u8, "this is the downloaded url contents");
     errdefer allocator.free(result);
@@ -7441,7 +7441,7 @@ fn fetchUrl(allocator: *Allocator, url: []const u8) ![]u8 {
     return result;
 }
 
-fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 {
+fn readFile(allocator: Allocator, filename: []const u8) ![]u8 {
     _ = filename; // this is just an example, we don't actually do it!
     const result = try allocator.dupe(u8, "this is the file contents");
     errdefer allocator.free(result);
@@ -10050,8 +10050,8 @@ pub fn main() void {
       C has a default allocator - malloc, realloc, and free.
       When linking against libc, Zig exposes this allocator with {#syntax#}std.heap.c_allocator{#endsyntax#}.
       However, by convention, there is no default allocator in Zig. Instead, functions which need to
-      allocate accept an {#syntax#}*Allocator{#endsyntax#} parameter. Likewise, data structures such as
-      {#syntax#}std.ArrayList{#endsyntax#} accept an {#syntax#}*Allocator{#endsyntax#} parameter in
+      allocate accept an {#syntax#}Allocator{#endsyntax#} parameter. Likewise, data structures such as
+      {#syntax#}std.ArrayList{#endsyntax#} accept an {#syntax#}Allocator{#endsyntax#} parameter in
       their initialization functions:
       

{#code_begin|test|allocator#} @@ -10061,12 +10061,12 @@ const expect = std.testing.expect; test "using an allocator" { var buffer: [100]u8 = undefined; - const allocator = &std.heap.FixedBufferAllocator.init(&buffer).allocator; + const allocator = std.heap.FixedBufferAllocator.init(&buffer).getAllocator(); const result = try concat(allocator, "foo", "bar"); try expect(std.mem.eql(u8, "foobar", result)); } -fn concat(allocator: *Allocator, a: []const u8, b: []const u8) ![]u8 { +fn concat(allocator: Allocator, a: []const u8, b: []const u8) ![]u8 { const result = try allocator.alloc(u8, a.len + b.len); std.mem.copy(u8, result, a); std.mem.copy(u8, result[a.len..], b); @@ -10091,7 +10091,7 @@ fn concat(allocator: *Allocator, a: []const u8, b: []const u8) ![]u8 {

  1. - Are you making a library? In this case, best to accept an {#syntax#}*Allocator{#endsyntax#} + Are you making a library? In this case, best to accept an {#syntax#}Allocator{#endsyntax#} as a parameter and allow your library's users to decide what allocator to use.
  2. Are you linking libc? In this case, {#syntax#}std.heap.c_allocator{#endsyntax#} is likely @@ -10114,7 +10114,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const ptr = try allocator.create(i32); std.debug.print("ptr={*}\n", .{ptr}); @@ -10281,7 +10281,7 @@ test "string literal to constant slice" {

    For example, the function's documentation may say "caller owns the returned memory", in which case the code that calls the function must have a plan for when to free that memory. Probably in this situation, - the function will accept an {#syntax#}*Allocator{#endsyntax#} parameter. + the function will accept an {#syntax#}Allocator{#endsyntax#} parameter.

    Sometimes the lifetime of a pointer may be more complicated. For example, the @@ -10820,7 +10820,7 @@ const std = @import("std"); pub fn main() !void { var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; - const gpa = &general_purpose_allocator.allocator; + const gpa = general_purpose_allocator.getAllocator(); const args = try std.process.argsAlloc(gpa); defer std.process.argsFree(gpa, args); @@ -10842,7 +10842,7 @@ const PreopenList = std.fs.wasi.PreopenList; pub fn main() !void { var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; - const gpa = &general_purpose_allocator.allocator; + const gpa = general_purpose_allocator.getAllocator(); var preopens = PreopenList.init(gpa); defer preopens.deinit(); diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 413b23cd48af..191ed4de94df 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -173,12 +173,12 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; const resume_node_count = std.math.max(extra_thread_count, 1); - self.eventfd_resume_nodes = try self.arena.allocator.alloc( + self.eventfd_resume_nodes = try self.arena.getAllocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, ); - self.extra_threads = try self.arena.allocator.alloc(Thread, extra_thread_count); + self.extra_threads = try self.arena.getAllocator().alloc(Thread, extra_thread_count); try self.initOsData(extra_thread_count); errdefer self.deinitOsData(); diff --git a/src/AstGen.zig b/src/AstGen.zig index 5cc7f8ef6581..2e58c8c5d9a4 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -98,7 +98,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { var astgen: AstGen = .{ .gpa = gpa, - .arena = &arena.allocator, + .arena = arena.getAllocator(), .tree = &tree, }; defer astgen.deinit(gpa); @@ -1939,6 +1939,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa); defer block_arena.deinit(); + const block_arena_allocator = block_arena.getAllocator(); var noreturn_src_node: Ast.Node.Index = 0; var scope = parent_scope; @@ -1959,13 +1960,13 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod } switch (node_tags[statement]) { // zig fmt: off - .global_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.globalVarDecl(statement)), - .local_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.localVarDecl(statement)), - .simple_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.simpleVarDecl(statement)), - .aligned_var_decl => scope = try varDecl(gz, scope, statement, &block_arena.allocator, tree.alignedVarDecl(statement)), + .global_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.globalVarDecl(statement)), + .local_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.localVarDecl(statement)), + .simple_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.simpleVarDecl(statement)), + .aligned_var_decl => scope = try varDecl(gz, scope, statement, block_arena_allocator, tree.alignedVarDecl(statement)), - .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_normal), - .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, &block_arena.allocator, .defer_error), + .@"defer" => scope = try makeDeferScope(gz.astgen, scope, statement, block_arena_allocator, .defer_normal), + .@"errdefer" => scope = try makeDeferScope(gz.astgen, scope, statement, block_arena_allocator, .defer_error), .assign => try assign(gz, scope, statement), diff --git a/src/Compilation.zig b/src/Compilation.zig index f6ee58b5efef..7c855862fd56 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -412,28 +412,29 @@ pub const AllErrors = struct { errors: *std.ArrayList(Message), module_err_msg: Module.ErrorMsg, ) !void { - const notes = try arena.allocator.alloc(Message, module_err_msg.notes.len); + const allocator = arena.getAllocator(); + const notes = try allocator.alloc(Message, module_err_msg.notes.len); for (notes) |*note, i| { const module_note = module_err_msg.notes[i]; const source = try module_note.src_loc.file_scope.getSource(module.gpa); const byte_offset = try module_note.src_loc.byteOffset(module.gpa); const loc = std.zig.findLineColumn(source, byte_offset); - const file_path = try module_note.src_loc.file_scope.fullPath(&arena.allocator); + const file_path = try module_note.src_loc.file_scope.fullPath(allocator); note.* = .{ .src = .{ .src_path = file_path, - .msg = try arena.allocator.dupe(u8, module_note.msg), + .msg = try allocator.dupe(u8, module_note.msg), .byte_offset = byte_offset, .line = @intCast(u32, loc.line), .column = @intCast(u32, loc.column), - .source_line = try arena.allocator.dupe(u8, loc.source_line), + .source_line = try allocator.dupe(u8, loc.source_line), }, }; } if (module_err_msg.src_loc.lazy == .entire_file) { try errors.append(.{ .plain = .{ - .msg = try arena.allocator.dupe(u8, module_err_msg.msg), + .msg = try allocator.dupe(u8, module_err_msg.msg), }, }); return; @@ -441,16 +442,16 @@ pub const AllErrors = struct { const source = try module_err_msg.src_loc.file_scope.getSource(module.gpa); const byte_offset = try module_err_msg.src_loc.byteOffset(module.gpa); const loc = std.zig.findLineColumn(source, byte_offset); - const file_path = try module_err_msg.src_loc.file_scope.fullPath(&arena.allocator); + const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator); try errors.append(.{ .src = .{ .src_path = file_path, - .msg = try arena.allocator.dupe(u8, module_err_msg.msg), + .msg = try allocator.dupe(u8, module_err_msg.msg), .byte_offset = byte_offset, .line = @intCast(u32, loc.line), .column = @intCast(u32, loc.column), .notes = notes, - .source_line = try arena.allocator.dupe(u8, loc.source_line), + .source_line = try allocator.dupe(u8, loc.source_line), }, }); } @@ -548,11 +549,12 @@ pub const AllErrors = struct { msg: []const u8, optional_children: ?AllErrors, ) !void { - const duped_msg = try arena.allocator.dupe(u8, msg); + const allocator = arena.getAllocator(); + const duped_msg = try allocator.dupe(u8, msg); if (optional_children) |*children| { try errors.append(.{ .plain = .{ .msg = duped_msg, - .notes = try dupeList(children.list, &arena.allocator), + .notes = try dupeList(children.list, allocator), } }); } else { try errors.append(.{ .plain = .{ .msg = duped_msg } }); @@ -786,7 +788,7 @@ fn addPackageTableToCacheHash( seen_table: *std.AutoHashMap(*Package, void), hash_type: union(enum) { path_bytes, files: *Cache.Manifest }, ) (error{OutOfMemory} || std.os.GetCwdError)!void { - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const packages = try allocator.alloc(Package.Table.KV, pkg_table.count()); { @@ -850,7 +852,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { // initialization and then is freed in deinit(). var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); // We put the `Compilation` itself in the arena. Freeing the arena will free the module. // It's initialized later after we prepare the initialization options. @@ -1208,7 +1210,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { { var local_arena = std.heap.ArenaAllocator.init(gpa); defer local_arena.deinit(); - var seen_table = std.AutoHashMap(*Package, void).init(&local_arena.allocator); + var seen_table = std.AutoHashMap(*Package, void).init(local_arena.getAllocator()); try addPackageTableToCacheHash(&hash, &local_arena, main_pkg.table, &seen_table, .path_bytes); } hash.add(valgrind); @@ -2011,6 +2013,7 @@ pub fn totalErrorCount(self: *Compilation) usize { pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { var arena = std.heap.ArenaAllocator.init(self.gpa); errdefer arena.deinit(); + const arena_allocator = arena.getAllocator(); var errors = std.ArrayList(AllErrors.Message).init(self.gpa); defer errors.deinit(); @@ -2024,8 +2027,8 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { // C error reporting bubbling up. try errors.append(.{ .src = .{ - .src_path = try arena.allocator.dupe(u8, c_object.src.src_path), - .msg = try std.fmt.allocPrint(&arena.allocator, "unable to build C object: {s}", .{ + .src_path = try arena_allocator.dupe(u8, c_object.src.src_path), + .msg = try std.fmt.allocPrint(arena_allocator, "unable to build C object: {s}", .{ err_msg.msg, }), .byte_offset = 0, @@ -2050,7 +2053,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { // must have completed successfully. const tree = try entry.key_ptr.*.getTree(module.gpa); assert(tree.errors.len == 0); - try AllErrors.addZir(&arena.allocator, &errors, entry.key_ptr.*); + try AllErrors.addZir(arena_allocator, &errors, entry.key_ptr.*); } } } @@ -2089,7 +2092,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) { try errors.append(.{ .plain = .{ - .msg = try std.fmt.allocPrint(&arena.allocator, "no entry point found", .{}), + .msg = try std.fmt.allocPrint(arena_allocator, "no entry point found", .{}), }, }); } @@ -2121,7 +2124,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { assert(errors.items.len == self.totalErrorCount()); return AllErrors{ - .list = try arena.allocator.dupe(AllErrors.Message, errors.items), + .list = try arena_allocator.dupe(AllErrors.Message, errors.items), .arena = arena.state, }; } @@ -2292,7 +2295,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress var tmp_arena = std.heap.ArenaAllocator.init(gpa); defer tmp_arena.deinit(); - const sema_arena = &tmp_arena.allocator; + const sema_arena = tmp_arena.getAllocator(); const sema_frame = tracy.namedFrame("sema"); var sema_frame_ended = false; @@ -2387,7 +2390,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), .typedefs = c_codegen.TypedefMap.init(gpa), - .typedefs_arena = &typedefs_arena.allocator, + .typedefs_arena = typedefs_arena.getAllocator(), }; defer dg.fwd_decl.deinit(); defer dg.typedefs.deinit(); @@ -2841,7 +2844,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { const digest = if (!actual_hit) digest: { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const tmp_digest = man.hash.peek(); const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest }); @@ -3096,7 +3099,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const c_source_basename = std.fs.path.basename(c_object.src.src_path); @@ -4417,7 +4420,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); // Here we use the legacy stage1 C++ compiler to compile Zig code. const mod = comp.bin_file.options.module.?; @@ -4454,7 +4457,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node _ = try man.addFile(main_zig_file, null); { - var seen_table = std.AutoHashMap(*Package, void).init(&arena_allocator.allocator); + var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.getAllocator()); try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = &man }); } man.hash.add(comp.bin_file.options.valgrind); diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig index 0fd26532f0b9..e99bfc746471 100644 --- a/src/DepTokenizer.zig +++ b/src/DepTokenizer.zig @@ -878,7 +878,7 @@ test "error prereq - continuation expecting end-of-line" { // - tokenize input, emit textual representation, and compare to expect fn depTokenizer(input: []const u8, expect: []const u8) !void { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); defer arena_allocator.deinit(); var it: Tokenizer = .{ .bytes = input }; diff --git a/src/Module.zig b/src/Module.zig index a40dcd14198e..d016418d8d8e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -517,7 +517,7 @@ pub const Decl = struct { pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { assert(decl.value_arena == null); - const arena_state = try arena.allocator.create(std.heap.ArenaAllocator.State); + const arena_state = try arena.getAllocator().create(std.heap.ArenaAllocator.State); arena_state.* = arena.state; decl.value_arena = arena_state; } @@ -3159,10 +3159,11 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const gpa = mod.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const struct_obj = try new_decl_arena.allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty); + const struct_obj = try new_decl_arena_allocator.create(Module.Struct); + const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); + const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const ty_ty = comptime Type.initTag(.type); struct_obj.* = .{ .owner_decl = undefined, // set below @@ -3202,12 +3203,13 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { var sema_arena = std.heap.ArenaAllocator.init(gpa); defer sema_arena.deinit(); + const sema_arena_allocator = sema_arena.getAllocator(); var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = &sema_arena.allocator, - .perm_arena = &new_decl_arena.allocator, + .arena = sema_arena_allocator, + .perm_arena = new_decl_arena_allocator, .code = file.zir, .owner_decl = new_decl, .func = null, @@ -3216,7 +3218,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, &new_decl_arena.allocator, null); + var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); defer wip_captures.deinit(); var block_scope: Sema.Block = .{ @@ -3265,15 +3267,17 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { // We need the memory for the Type to go into the arena for the Decl var decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer decl_arena.deinit(); + const decl_arena_allocator = decl_arena.getAllocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + const analysis_arena_allocator = analysis_arena.getAllocator(); var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = &analysis_arena.allocator, - .perm_arena = &decl_arena.allocator, + .arena = analysis_arena_allocator, + .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .func = null, @@ -3296,7 +3300,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { } log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); - var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Sema.Block = .{ @@ -3356,7 +3360,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { // not the struct itself. try sema.resolveTypeLayout(&block_scope, src, decl_tv.ty); - const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); + const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State); if (decl.is_usingnamespace) { const ty_ty = Type.initTag(.type); @@ -3370,7 +3374,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { } decl.ty = ty_ty; - decl.val = try Value.Tag.ty.create(&decl_arena.allocator, ty); + decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty); decl.align_val = Value.initTag(.null_value); decl.linksection_val = Value.initTag(.null_value); decl.has_tv = true; @@ -3400,10 +3404,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.clearValues(gpa); } - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.ty = try decl_tv.ty.copy(decl_arena_allocator); + decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.align_val = try align_val.copy(decl_arena_allocator); + decl.linksection_val = try linksection_val.copy(decl_arena_allocator); decl.@"addrspace" = address_space; decl.has_tv = true; decl.owns_tv = owns_tv; @@ -3453,7 +3457,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.owns_tv = true; queue_linker_work = true; - const copied_init = try variable.init.copy(&decl_arena.allocator); + const copied_init = try variable.init.copy(decl_arena_allocator); variable.init = copied_init; } }, @@ -3476,10 +3480,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { }, } - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.ty = try decl_tv.ty.copy(decl_arena_allocator); + decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.align_val = try align_val.copy(decl_arena_allocator); + decl.linksection_val = try linksection_val.copy(decl_arena_allocator); decl.@"addrspace" = address_space; decl.has_tv = true; decl_arena_state.* = decl_arena.state; @@ -4128,12 +4132,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem // Use the Decl's arena for captured values. var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; + const decl_arena_allocator = decl_arena.getAllocator(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = arena, - .perm_arena = &decl_arena.allocator, + .perm_arena = decl_arena_allocator, .code = decl.getFileScope().zir, .owner_decl = decl, .func = func, @@ -4147,7 +4152,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); sema.air_extra.items.len += reserved_count; - var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); defer wip_captures.deinit(); var inner_block: Sema.Block = .{ @@ -4751,7 +4756,7 @@ pub fn populateTestFunctions(mod: *Module) !void { // decl reference it as a slice. var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const arena = &new_decl_arena.allocator; + const arena = new_decl_arena.getAllocator(); const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ @@ -4770,10 +4775,10 @@ pub fn populateTestFunctions(mod: *Module) !void { const test_name_decl = n: { var name_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer name_decl_arena.deinit(); - const bytes = try name_decl_arena.allocator.dupe(u8, test_name_slice); + const bytes = try arena.dupe(u8, test_name_slice); const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.Tag.array_u8.create(&name_decl_arena.allocator, bytes.len), - .val = try Value.Tag.bytes.create(&name_decl_arena.allocator, bytes), + .ty = try Type.Tag.array_u8.create(arena, bytes.len), + .val = try Value.Tag.bytes.create(arena, bytes), }); try test_name_decl.finalizeNewArena(&name_decl_arena); break :n test_name_decl; @@ -4802,7 +4807,7 @@ pub fn populateTestFunctions(mod: *Module) !void { { var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const arena = &new_decl_arena.allocator; + const arena = new_decl_arena.getAllocator(); // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); diff --git a/src/Sema.zig b/src/Sema.zig index ce0c5c8ed75c..9e0aa2f75edb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -418,7 +418,7 @@ pub const Block = struct { finished: bool, pub fn arena(wad: *WipAnonDecl) Allocator { - return &wad.new_decl_arena.allocator; + return wad.new_decl_arena.getAllocator(); } pub fn deinit(wad: *WipAnonDecl) void { @@ -1594,10 +1594,11 @@ fn zirStructDecl( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const struct_obj = try new_decl_arena.allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty); + const struct_obj = try new_decl_arena_allocator.create(Module.Struct); + const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); + const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, @@ -1698,15 +1699,16 @@ fn zirEnumDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const enum_obj = try new_decl_arena.allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumFull); + const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); + const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); enum_ty_payload.* = .{ .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); + const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, @@ -1790,17 +1792,17 @@ fn zirEnumDecl( break :blk try sema.resolveType(block, src, tag_type_ref); } const bits = std.math.log2_int_ceil(usize, fields_len); - break :blk try Type.Tag.int_unsigned.create(&new_decl_arena.allocator, bits); + break :blk try Type.Tag.int_unsigned.create(new_decl_arena_allocator, bits); }; enum_obj.tag_ty = tag_ty; } - try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); + try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { if (bag != 0) break true; } else false; if (any_values) { - try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ + try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = enum_obj.tag_ty, }); } @@ -1820,7 +1822,7 @@ fn zirEnumDecl( extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = try new_decl_arena.allocator.dupe(u8, field_name_zir); + const field_name = try new_decl_arena_allocator.dupe(u8, field_name_zir); const gop = enum_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { @@ -1843,12 +1845,12 @@ fn zirEnumDecl( // that points to this default value expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val; - const copied_tag_val = try tag_val.copy(&new_decl_arena.allocator); + const copied_tag_val = try tag_val.copy(new_decl_arena_allocator); enum_obj.values.putAssumeCapacityNoClobberContext(copied_tag_val, {}, .{ .ty = enum_obj.tag_ty, }); } else if (any_values) { - const tag_val = try Value.Tag.int_u64.create(&new_decl_arena.allocator, field_i); + const tag_val = try Value.Tag.int_u64.create(new_decl_arena_allocator, field_i); enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = enum_obj.tag_ty }); } } @@ -1887,16 +1889,17 @@ fn zirUnionDecl( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const union_obj = try new_decl_arena.allocator.create(Module.Union); + const union_obj = try new_decl_arena_allocator.create(Module.Union); const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union"; - const union_payload = try new_decl_arena.allocator.create(Type.Payload.Union); + const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); union_payload.* = .{ .base = .{ .tag = type_tag }, .data = union_obj, }; const union_ty = Type.initPayload(&union_payload.base); - const union_val = try Value.Tag.ty.create(&new_decl_arena.allocator, union_ty); + const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, @@ -1955,15 +1958,16 @@ fn zirOpaqueDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const opaque_obj = try new_decl_arena.allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena.allocator.create(Type.Payload.Opaque); + const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); + const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); opaque_ty_payload.* = .{ .base = .{ .tag = .@"opaque" }, .data = opaque_obj, }; const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(&new_decl_arena.allocator, opaque_ty); + const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, @@ -2008,10 +2012,11 @@ fn zirErrorSetDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const error_set = try new_decl_arena.allocator.create(Module.ErrorSet); - const error_set_ty = try Type.Tag.error_set.create(&new_decl_arena.allocator, error_set); - const error_set_val = try Value.Tag.ty.create(&new_decl_arena.allocator, error_set_ty); + const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); + const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); + const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); const type_name = try sema.createTypeName(block, name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(block, .{ .ty = Type.type, @@ -2019,9 +2024,9 @@ fn zirErrorSetDecl( }, type_name); new_decl.owns_tv = true; errdefer sema.mod.abortAnonDecl(new_decl); - const names = try new_decl_arena.allocator.alloc([]const u8, fields.len); + const names = try new_decl_arena_allocator.alloc([]const u8, fields.len); for (fields) |str_index, i| { - names[i] = try new_decl_arena.allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); + names[i] = try new_decl_arena_allocator.dupe(u8, sema.code.nullTerminatedString(str_index)); } error_set.* = .{ .owner_decl = new_decl, @@ -3935,7 +3940,7 @@ fn analyzeCall( { var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); for (memoized_call_key.args) |*arg| { arg.* = try arg.*.copy(arena); @@ -4069,6 +4074,7 @@ fn analyzeCall( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); // Re-run the block that creates the function, with the comptime parameters // pre-populated inside `inst_map`. This causes `param_comptime` and @@ -4078,13 +4084,13 @@ fn analyzeCall( .mod = mod, .gpa = gpa, .arena = sema.arena, - .perm_arena = &new_decl_arena.allocator, + .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, .func = null, .fn_ret_ty = Type.void, .owner_func = null, - .comptime_args = try new_decl_arena.allocator.alloc(TypedValue, uncasted_args.len), + .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, .preallocated_new_func = new_module_func, }; @@ -4168,7 +4174,7 @@ fn analyzeCall( else => continue, } const arg = child_sema.inst_map.get(inst).?; - const copied_arg_ty = try child_sema.typeOf(arg).copy(&new_decl_arena.allocator); + const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); if (child_sema.resolveMaybeUndefValAllowVariables( &child_block, .unneeded, @@ -4176,7 +4182,7 @@ fn analyzeCall( ) catch unreachable) |arg_val| { child_sema.comptime_args[arg_i] = .{ .ty = copied_arg_ty, - .val = try arg_val.copy(&new_decl_arena.allocator), + .val = try arg_val.copy(new_decl_arena_allocator), }; } else { child_sema.comptime_args[arg_i] = .{ @@ -4191,8 +4197,8 @@ fn analyzeCall( try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. - new_decl.ty = try child_sema.typeOf(new_func_inst).copy(&new_decl_arena.allocator); - new_decl.val = try Value.Tag.function.create(&new_decl_arena.allocator, new_func); + new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); + new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); new_decl.analysis = .complete; log.debug("generic function '{s}' instantiated with type {}", .{ @@ -6047,8 +6053,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const target = sema.mod.getTarget(); - const min_int = try operand_ty.minInt(&arena.allocator, target); - const max_int = try operand_ty.maxInt(&arena.allocator, target); + const min_int = try operand_ty.minInt(arena.getAllocator(), target); + const max_int = try operand_ty.maxInt(arena.getAllocator(), target); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -12795,7 +12801,7 @@ const ComptimePtrMutationKit = struct { fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator { self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa); - return &self.decl_arena.allocator; + return self.decl_arena.getAllocator(); } fn finishArena(self: *ComptimePtrMutationKit) void { @@ -14287,6 +14293,7 @@ fn semaStructFields( var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; + const decl_arena_allocator = decl_arena.getAllocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -14294,8 +14301,8 @@ fn semaStructFields( var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = &analysis_arena.allocator, - .perm_arena = &decl_arena.allocator, + .arena = analysis_arena.getAllocator(), + .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .func = null, @@ -14304,7 +14311,7 @@ fn semaStructFields( }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ @@ -14328,7 +14335,7 @@ fn semaStructFields( try wip_captures.finalize(); - try struct_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; @@ -14359,7 +14366,7 @@ fn semaStructFields( extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = try decl_arena.allocator.dupe(u8, field_name_zir); + const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); const field_ty: Type = if (field_type_ref == .none) Type.initTag(.noreturn) else @@ -14371,7 +14378,7 @@ fn semaStructFields( const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); assert(!gop.found_existing); gop.value_ptr.* = .{ - .ty = try field_ty.copy(&decl_arena.allocator), + .ty = try field_ty.copy(decl_arena_allocator), .abi_align = Value.initTag(.abi_align_default), .default_val = Value.initTag(.unreachable_value), .is_comptime = is_comptime, @@ -14385,7 +14392,7 @@ fn semaStructFields( // that points to this alignment expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; - gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator); + gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator); } if (has_default) { const default_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); @@ -14396,7 +14403,7 @@ fn semaStructFields( // But only resolve the source location if we need to emit a compile error. const default_val = (try sema.resolveMaybeUndefVal(&block_scope, src, default_inst)) orelse return sema.failWithNeededComptime(&block_scope, src); - gop.value_ptr.default_val = try default_val.copy(&decl_arena.allocator); + gop.value_ptr.default_val = try default_val.copy(decl_arena_allocator); } } } @@ -14454,6 +14461,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; + const decl_arena_allocator = decl_arena.getAllocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -14461,8 +14469,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = &analysis_arena.allocator, - .perm_arena = &decl_arena.allocator, + .arena = analysis_arena.getAllocator(), + .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .func = null, @@ -14471,7 +14479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, &decl_arena.allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ @@ -14495,7 +14503,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { try wip_captures.finalize(); - try union_obj.fields.ensureTotalCapacity(&decl_arena.allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); var int_tag_ty: Type = undefined; var enum_field_names: ?*Module.EnumNumbered.NameMap = null; @@ -14571,7 +14579,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } // This string needs to outlive the ZIR code. - const field_name = try decl_arena.allocator.dupe(u8, field_name_zir); + const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); if (enum_field_names) |set| { set.putAssumeCapacity(field_name, {}); } @@ -14589,7 +14597,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); assert(!gop.found_existing); gop.value_ptr.* = .{ - .ty = try field_ty.copy(&decl_arena.allocator), + .ty = try field_ty.copy(decl_arena_allocator), .abi_align = Value.initTag(.abi_align_default), }; @@ -14598,7 +14606,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { // that points to this alignment expression rather than the struct. // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(&block_scope, src, align_ref)).val; - gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator); + gop.value_ptr.abi_align = try abi_align_val.copy(decl_arena_allocator); } else { gop.value_ptr.abi_align = Value.initTag(.abi_align_default); } @@ -14615,15 +14623,16 @@ fn generateUnionTagTypeNumbered( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const enum_obj = try new_decl_arena.allocator.create(Module.EnumNumbered); - const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumNumbered); + const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); + const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); enum_ty_payload.* = .{ .base = .{ .tag = .enum_numbered }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); + const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); // TODO better type name const new_decl = try mod.createAnonymousDecl(block, .{ .ty = Type.type, @@ -14640,8 +14649,8 @@ fn generateUnionTagTypeNumbered( .node_offset = 0, }; // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = int_ty }); + try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ .ty = int_ty }); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; } @@ -14651,15 +14660,16 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); + const new_decl_arena_allocator = new_decl_arena.getAllocator(); - const enum_obj = try new_decl_arena.allocator.create(Module.EnumSimple); - const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumSimple); + const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); + const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); enum_ty_payload.* = .{ .base = .{ .tag = .enum_simple }, .data = enum_obj, }; const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); + const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); // TODO better type name const new_decl = try mod.createAnonymousDecl(block, .{ .ty = Type.type, @@ -14674,7 +14684,7 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type .node_offset = 0, }; // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); + try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); try new_decl.finalizeNewArena(&new_decl_arena); return enum_ty; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 43776dea6723..142bf1a146fe 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -390,6 +390,7 @@ pub const DeclGen = struct { // Fall back to generic implementation. var arena = std.heap.ArenaAllocator.init(dg.module.gpa); defer arena.deinit(); + const arena_allocator = arena.getAllocator(); try writer.writeAll("{"); var index: usize = 0; @@ -397,7 +398,7 @@ pub const DeclGen = struct { const elem_ty = ty.elemType(); while (index < len) : (index += 1) { if (index != 0) try writer.writeAll(","); - const elem_val = try val.elemValue(&arena.allocator, index); + const elem_val = try val.elemValue(arena_allocator, index); try dg.renderValue(writer, elem_ty, elem_val); } if (ty.sentinel()) |sentinel_val| { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e326b2a677c3..31d3461846d3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -331,7 +331,7 @@ pub const Object = struct { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const mod = comp.bin_file.options.module.?; const cache_dir = mod.zig_cache_artifact_directory; @@ -779,7 +779,7 @@ pub const DeclGen = struct { // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator); + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator()); const opaque_obj = t.castTag(.@"opaque").?.data; const name = try opaque_obj.getFullyQualifiedName(gpa); @@ -837,7 +837,7 @@ pub const DeclGen = struct { // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator); + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator()); const struct_obj = t.castTag(.@"struct").?.data; @@ -871,7 +871,7 @@ pub const DeclGen = struct { // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(&dg.object.type_map_arena.allocator); + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator()); const union_obj = t.cast(Type.Payload.Union).?.data; const target = dg.module.getTarget(); @@ -2485,7 +2485,7 @@ pub const FuncGen = struct { var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const llvm_params_len = args.len; const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len); diff --git a/src/crash_report.zig b/src/crash_report.zig index f11f95fe0c8d..92c37d2ac815 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -85,7 +85,7 @@ fn dumpStatusReport() !void { const anal = zir_state orelse return; // Note: We have the panic mutex here, so we can safely use the global crash heap. var fba = std.heap.FixedBufferAllocator.init(&crash_heap); - const allocator = &fba.allocator; + const allocator = fba.getAllocator(); const stderr = io.getStdErr().writer(); const block: *Sema.Block = anal.block; diff --git a/src/glibc.zig b/src/glibc.zig index e6e67e4f49f0..c3f2da599eff 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -65,7 +65,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!* var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); var all_versions = std.ArrayListUnmanaged(std.builtin.Version){}; var all_functions = std.ArrayListUnmanaged(Fn){}; @@ -256,7 +256,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); switch (crt_file) { .crti_o => { @@ -711,7 +711,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const target = comp.getTarget(); const target_version = target.os.version_range.linux.glibc; diff --git a/src/libcxx.zig b/src/libcxx.zig index 9986c922ba97..908df3ca2592 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -89,7 +89,7 @@ pub fn buildLibCXX(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const root_name = "c++"; const output_mode = .Lib; @@ -236,7 +236,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const root_name = "c++abi"; const output_mode = .Lib; diff --git a/src/libtsan.zig b/src/libtsan.zig index 57f1f8c78ee7..47089cc7790f 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -15,7 +15,7 @@ pub fn buildTsan(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const root_name = "tsan"; const output_mode = .Lib; diff --git a/src/libunwind.zig b/src/libunwind.zig index 50c329c6d6d3..dabd8631b9e4 100644 --- a/src/libunwind.zig +++ b/src/libunwind.zig @@ -17,7 +17,7 @@ pub fn buildStaticLib(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const root_name = "unwind"; const output_mode = .Lib; diff --git a/src/link.zig b/src/link.zig index b57be64d4279..0b191ca8daf8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -628,7 +628,7 @@ pub const File = struct { var arena_allocator = std.heap.ArenaAllocator.init(base.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/C.zig b/src/link/C.zig index cbd36ebab504..6bdace3fcaf3 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -128,7 +128,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .decl = decl, .fwd_decl = fwd_decl.toManaged(module.gpa), .typedefs = typedefs.promote(module.gpa), - .typedefs_arena = &self.arena.allocator, + .typedefs_arena = self.arena.getAllocator(), }, .code = code.toManaged(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code @@ -193,7 +193,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .decl = decl, .fwd_decl = fwd_decl.toManaged(module.gpa), .typedefs = typedefs.promote(module.gpa), - .typedefs_arena = &self.arena.allocator, + .typedefs_arena = self.arena.getAllocator(), }, .code = code.toManaged(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code diff --git a/src/link/Coff.zig b/src/link/Coff.zig index d5e3e6caa32f..00bddfe578b8 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -877,7 +877,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 6670f1a8b667..200ca488984d 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1243,7 +1243,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/MachO.zig b/src/link/MachO.zig index bd26b64ad281..5e0e76648351 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -412,7 +412,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. @@ -5379,7 +5379,7 @@ fn snapshotState(self: *MachO) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const out_file = try emit.directory.handle.createFile("snapshots.json", .{ .truncate = self.cold_start, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index c4c42940b85f..bc7e4d71a414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -168,7 +168,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void { try fn_map_res.value_ptr.functions.put(gpa, decl, out); } else { const file = decl.getFileScope(); - const arena = &self.path_arena.allocator; + const arena = self.path_arena.getAllocator(); // each file gets a symbol fn_map_res.value_ptr.* = .{ .sym_index = blk: { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index a8606ac27f7c..7ffd067596d6 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -950,7 +950,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/tapi.zig b/src/link/tapi.zig index 84257de388e3..fe5ef2af9c9f 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -120,7 +120,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as []TbdV4", .{}); const inner = lib_stub.yaml.parse([]TbdV4) catch break :err; - var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len); + var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, inner.len); for (inner) |doc, i| { out[i] = .{ .v4 = doc }; } @@ -130,7 +130,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as TbdV4", .{}); const inner = lib_stub.yaml.parse(TbdV4) catch break :err; - var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1); + var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1); out[0] = .{ .v4 = inner }; break :blk out; } @@ -148,7 +148,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as TbdV3", .{}); const inner = lib_stub.yaml.parse(TbdV3) catch break :err; - var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1); + var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1); out[0] = .{ .v3 = inner }; break :blk out; } diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig index 4392befb597a..261caee717f1 100644 --- a/src/link/tapi/yaml.zig +++ b/src/link/tapi/yaml.zig @@ -248,15 +248,16 @@ pub const Yaml = struct { pub fn load(allocator: Allocator, source: []const u8) !Yaml { var arena = ArenaAllocator.init(allocator); + const arena_allocator = arena.getAllocator(); - var tree = Tree.init(&arena.allocator); + var tree = Tree.init(arena_allocator); try tree.parse(source); - var docs = std.ArrayList(Value).init(&arena.allocator); + var docs = std.ArrayList(Value).init(arena_allocator); try docs.ensureUnusedCapacity(tree.docs.items.len); for (tree.docs.items) |node| { - const value = try Value.fromNode(&arena.allocator, &tree, node, null); + const value = try Value.fromNode(arena_allocator, &tree, node, null); docs.appendAssumeCapacity(value); } @@ -299,7 +300,7 @@ pub const Yaml = struct { .Pointer => |info| { switch (info.size) { .Slice => { - var parsed = try self.arena.allocator.alloc(info.child, self.docs.items.len); + var parsed = try self.arena.getAllocator().alloc(info.child, self.docs.items.len); for (self.docs.items) |doc, i| { parsed[i] = try self.parseValue(info.child, doc); } @@ -361,7 +362,7 @@ pub const Yaml = struct { inline for (struct_info.fields) |field| { const value: ?Value = map.get(field.name) orelse blk: { - const field_name = try mem.replaceOwned(u8, &self.arena.allocator, field.name, "_", "-"); + const field_name = try mem.replaceOwned(u8, self.arena.getAllocator(), field.name, "_", "-"); break :blk map.get(field_name); }; @@ -382,7 +383,7 @@ pub const Yaml = struct { fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T { const ptr_info = @typeInfo(T).Pointer; - const arena = &self.arena.allocator; + const arena = self.arena.getAllocator(); switch (ptr_info.size) { .Slice => { diff --git a/src/main.zig b/src/main.zig index 52272db8ef2c..c97415ff29e5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -139,7 +139,7 @@ pub fn main() anyerror!void { const gpa = gpa: { if (!builtin.link_libc) { gpa_need_deinit = true; - break :gpa &general_purpose_allocator.allocator; + break :gpa general_purpose_allocator.getAllocator(); } // We would prefer to use raw libc allocator here, but cannot // use it if it won't support the alignment we need. @@ -153,7 +153,7 @@ pub fn main() anyerror!void { }; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); - const arena = &arena_instance.allocator; + const arena = arena_instance.getAllocator(); const args = try process.argsAlloc(arena); @@ -3619,7 +3619,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa); defer errors.deinit(); - try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file); + try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file); const ttyconf: std.debug.TTY.Config = switch (color) { .auto => std.debug.detectTTYConfig(), .on => .escape_codes, @@ -3818,7 +3818,7 @@ fn fmtPathFile( var errors = std.ArrayList(Compilation.AllErrors.Message).init(fmt.gpa); defer errors.deinit(); - try Compilation.AllErrors.addZir(&arena_instance.allocator, &errors, &file); + try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file); const ttyconf: std.debug.TTY.Config = switch (fmt.color) { .auto => std.debug.detectTTYConfig(), .on => .escape_codes, diff --git a/src/mingw.zig b/src/mingw.zig index b2628553b9bf..6f02ebf39525 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { } var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); switch (crt_file) { .crt2_o => { @@ -281,7 +281,7 @@ fn add_cc_args( pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const def_file_path = findDef(comp, arena, lib_name) catch |err| switch (err) { error.FileNotFound => { diff --git a/src/musl.zig b/src/musl.zig index b9d00c4b124f..7c3957fdd744 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); switch (crt_file) { .crti_o => { diff --git a/src/print_air.zig b/src/print_air.zig index 86fc6a63962d..ce53a26aeb78 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -47,7 +47,7 @@ pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void { var writer: Writer = .{ .gpa = gpa, - .arena = &arena.allocator, + .arena = arena.getAllocator(), .air = air, .zir = zir, .liveness = liveness, diff --git a/src/print_zir.zig b/src/print_zir.zig index 9532b33ccd63..996898b4ace2 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -19,7 +19,7 @@ pub fn renderAsTextToFile( var writer: Writer = .{ .gpa = gpa, - .arena = &arena.allocator, + .arena = arena.getAllocator(), .file = scope_file, .code = scope_file.zir, .indent = 0, @@ -74,7 +74,7 @@ pub fn renderInstructionContext( var writer: Writer = .{ .gpa = gpa, - .arena = &arena.allocator, + .arena = arena.getAllocator(), .file = scope_file, .code = scope_file.zir, .indent = if (indent < 2) 2 else indent, @@ -106,7 +106,7 @@ pub fn renderSingleInstruction( var writer: Writer = .{ .gpa = gpa, - .arena = &arena.allocator, + .arena = arena.getAllocator(), .file = scope_file, .code = scope_file.zir, .indent = indent, diff --git a/src/stage1.zig b/src/stage1.zig index 942be668892d..810dcc477b93 100644 --- a/src/stage1.zig +++ b/src/stage1.zig @@ -38,7 +38,7 @@ pub fn main(argc: c_int, argv: [*][*:0]u8) callconv(.C) c_int { const gpa = std.heap.c_allocator; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); - const arena = &arena_instance.allocator; + const arena = arena_instance.getAllocator(); const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{s}", .{"OutOfMemory"}); for (args) |*arg, i| { diff --git a/src/test.zig b/src/test.zig index a9c1905b3636..74147069e80f 100644 --- a/src/test.zig +++ b/src/test.zig @@ -692,7 +692,7 @@ pub const TestContext = struct { var arena_allocator = std.heap.ArenaAllocator.init(allocator); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); diff --git a/src/translate_c.zig b/src/translate_c.zig index 109535d0810f..570059255004 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -373,13 +373,14 @@ pub fn translate( // from this function. var arena = std.heap.ArenaAllocator.init(gpa); errdefer arena.deinit(); + const arena_allocator = arena.getAllocator(); var context = Context{ .gpa = gpa, - .arena = &arena.allocator, + .arena = arena_allocator, .source_manager = ast_unit.getSourceManager(), .alias_list = AliasList.init(gpa), - .global_scope = try arena.allocator.create(Scope.Root), + .global_scope = try arena_allocator.create(Scope.Root), .clang_context = ast_unit.getASTContext(), .pattern_list = try PatternList.init(gpa), }; diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig index 18906cb6c710..b2235ad53ebe 100644 --- a/src/wasi_libc.zig +++ b/src/wasi_libc.zig @@ -67,7 +67,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); switch (crt_file) { .crt1_reactor_o => { diff --git a/test/cli.zig b/test/cli.zig index 3f50ebe4039f..20a2143f51c3 100644 --- a/test/cli.zig +++ b/test/cli.zig @@ -16,7 +16,7 @@ pub fn main() !void { // skip my own exe name _ = arg_it.skip(); - a = &arena.allocator; + a = arena.getAllocator(); const zig_exe_rel = try (arg_it.next(a) orelse { std.debug.print("Expected first argument to be path to zig compiler\n", .{}); diff --git a/test/compare_output.zig b/test/compare_output.zig index 68d8f2a80730..46cbdd77f6ee 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -491,7 +491,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\pub fn main() !void { \\ var allocator_buf: [10]u8 = undefined; \\ var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); - \\ const allocator = &std.heap.loggingAllocator(&fixedBufferAllocator.allocator).allocator; + \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.getAllocator()).getAllocator(); \\ \\ var a = try allocator.alloc(u8, 10); \\ a = allocator.shrink(a, 5); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 09a165304cc3..3ed47432754b 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -6550,9 +6550,9 @@ pub fn addCases(ctx: *TestContext) !void { ctx.objErrStage1("method call with first arg type wrong container", \\pub const List = struct { \\ len: usize, - \\ allocator: *Allocator, + \\ allocator: Allocator, \\ - \\ pub fn init(allocator: *Allocator) List { + \\ pub fn init(allocator: Allocator) List { \\ return List { \\ .len = 0, \\ .allocator = allocator, @@ -6573,7 +6573,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ x.init(); \\} , &[_][]const u8{ - "tmp.zig:23:5: error: expected type '*Allocator', found '*List'", + "tmp.zig:23:5: error: expected type 'Allocator', found '*List'", }); ctx.objErrStage1("binary not on number literal", @@ -7569,7 +7569,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ \\export fn entry() void { \\ const a = MdNode.Header { - \\ .text = MdText.init(&std.testing.allocator), + \\ .text = MdText.init(std.testing.allocator), \\ .weight = HeaderWeight.H1, \\ }; \\ _ = a; diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig index 35fe6e5c6a08..f0dea39ccbb0 100644 --- a/test/standalone/brace_expansion/main.zig +++ b/test/standalone/brace_expansion/main.zig @@ -16,7 +16,7 @@ const Token = union(enum) { }; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; -var global_allocator = &gpa.allocator; +const global_allocator = gpa.getAllocator(); fn tokenize(input: []const u8) !ArrayList(Token) { const State = enum { diff --git a/test/standalone/cat/main.zig b/test/standalone/cat/main.zig index 80ec97877a0d..a8b16a05ca48 100644 --- a/test/standalone/cat/main.zig +++ b/test/standalone/cat/main.zig @@ -8,7 +8,7 @@ const warn = std.log.warn; pub fn main() !void { var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_instance.deinit(); - const arena = &arena_instance.allocator; + const arena = arena_instance.getAllocator(); const args = try process.argsAlloc(arena); diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig index 0480866867b6..e4ad6927b2c5 100644 --- a/tools/gen_spirv_spec.zig +++ b/tools/gen_spirv_spec.zig @@ -4,7 +4,7 @@ const g = @import("spirv/grammar.zig"); pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const args = try std.process.argsAlloc(allocator); if (args.len != 2) { diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig index f845c58b568d..a99d14752f31 100644 --- a/tools/gen_stubs.zig +++ b/tools/gen_stubs.zig @@ -25,7 +25,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const ally = &arena.allocator; + const ally = arena.getAllocator(); var symbols = std.ArrayList(Symbol).init(ally); var sections = std.ArrayList([]const u8).init(ally); diff --git a/tools/merge_anal_dumps.zig b/tools/merge_anal_dumps.zig index 648a76ebfb63..93e067460557 100644 --- a/tools/merge_anal_dumps.zig +++ b/tools/merge_anal_dumps.zig @@ -9,7 +9,7 @@ pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const args = try std.process.argsAlloc(allocator); diff --git a/tools/process_headers.zig b/tools/process_headers.zig index fa5fdb0042ef..1a90f99343eb 100644 --- a/tools/process_headers.zig +++ b/tools/process_headers.zig @@ -284,7 +284,7 @@ const LibCVendor = enum { pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const args = try std.process.argsAlloc(allocator); var search_paths = std.ArrayList([]const u8).init(allocator); var opt_out_dir: ?[]const u8 = null; diff --git a/tools/update-license-headers.zig b/tools/update-license-headers.zig index 4cc60ca4ea8f..83c47f1481fd 100644 --- a/tools/update-license-headers.zig +++ b/tools/update-license-headers.zig @@ -10,7 +10,7 @@ pub fn main() !void { defer root_node.end(); var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const arena = &arena_allocator.allocator; + const arena = arena_allocator.getAllocator(); const args = try std.process.argsAlloc(arena); const path_to_walk = args[1]; diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index c999db289991..90a96e057294 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -450,8 +450,13 @@ const cpu_targets = struct { pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); +<<<<<<< HEAD const allocator = &arena.allocator; +======= + + const allocator = arena.getAllocator(); +>>>>>>> 11157e318 (allocgate: stage 1 and 2 building) const args = try std.process.argsAlloc(allocator); if (args.len <= 1) { diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig index 2eccb0ee1b4c..70bc5a1c74ee 100644 --- a/tools/update_cpu_features.zig +++ b/tools/update_cpu_features.zig @@ -769,7 +769,7 @@ const llvm_targets = [_]LlvmTarget{ pub fn main() anyerror!void { var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_state.deinit(); - const arena = &arena_state.allocator; + const arena = arena_state.getAllocator(); const args = try std.process.argsAlloc(arena); if (args.len <= 1) { @@ -845,7 +845,7 @@ fn processOneTarget(job: Job) anyerror!void { var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_state.deinit(); - const arena = &arena_state.allocator; + const arena = arena_state.getAllocator(); var progress_node = job.root_progress.start(llvm_target.zig_name, 3); progress_node.activate(); diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig index 6232a2e8f0c7..7cccb47e1cb7 100644 --- a/tools/update_glibc.zig +++ b/tools/update_glibc.zig @@ -133,7 +133,7 @@ const Function = struct { pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const args = try std.process.argsAlloc(allocator); const in_glibc_dir = args[1]; // path to the unzipped tarball of glibc, e.g. ~/downloads/glibc-2.25 const zig_src_dir = args[2]; // path to the source checkout of zig, lib dir, e.g. ~/zig-src/lib diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig index 756d311ecc73..0c6c570a317c 100644 --- a/tools/update_spirv_features.zig +++ b/tools/update_spirv_features.zig @@ -48,7 +48,7 @@ const Version = struct { pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = &arena.allocator; + const allocator = arena.getAllocator(); const args = try std.process.argsAlloc(allocator); From 1093b09a989edb8553e79b061bb15c5745f5d193 Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Fri, 29 Oct 2021 02:08:41 +0100 Subject: [PATCH 04/10] allocgate: renamed getAllocator function to allocator --- ci/srht/update-download-page.zig | 2 +- doc/docgen.zig | 2 +- doc/langref.html.in | 8 ++-- lib/std/Thread.zig | 2 +- lib/std/array_list.zig | 6 +-- lib/std/atomic/queue.zig | 2 +- lib/std/atomic/stack.zig | 2 +- lib/std/build.zig | 6 +-- lib/std/build/OptionsStep.zig | 2 +- lib/std/builtin.zig | 2 +- lib/std/child_process.zig | 4 +- lib/std/crypto/benchmark.zig | 2 +- lib/std/debug.zig | 2 +- lib/std/event/loop.zig | 4 +- lib/std/fs/test.zig | 14 +++---- lib/std/hash/benchmark.zig | 2 +- lib/std/heap.zig | 40 ++++++++++---------- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/general_purpose_allocator.zig | 44 +++++++++++----------- lib/std/heap/log_to_writer_allocator.zig | 4 +- lib/std/heap/logging_allocator.zig | 2 +- lib/std/json.zig | 14 +++---- lib/std/json/write_stream.zig | 2 +- lib/std/mem.zig | 4 +- lib/std/net.zig | 4 +- lib/std/os/test.zig | 2 +- lib/std/process.zig | 2 +- lib/std/special/build_runner.zig | 2 +- lib/std/special/test_runner.zig | 2 +- lib/std/testing.zig | 6 +-- lib/std/testing/failing_allocator.zig | 2 +- lib/std/zig/parser_test.zig | 8 ++-- lib/std/zig/perf_test.zig | 2 +- lib/std/zig/string_literal.zig | 2 +- src/AstGen.zig | 4 +- src/Compilation.zig | 24 ++++++------ src/DepTokenizer.zig | 2 +- src/Module.zig | 16 ++++---- src/Sema.zig | 34 ++++++++--------- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 10 ++--- src/crash_report.zig | 2 +- src/glibc.zig | 6 +-- src/libcxx.zig | 4 +- src/libtsan.zig | 2 +- src/libunwind.zig | 2 +- src/link.zig | 2 +- src/link/C.zig | 4 +- src/link/Coff.zig | 2 +- src/link/Elf.zig | 2 +- src/link/MachO.zig | 4 +- src/link/Plan9.zig | 2 +- src/link/Wasm.zig | 2 +- src/link/tapi.zig | 6 +-- src/link/tapi/yaml.zig | 8 ++-- src/main.zig | 8 ++-- src/mingw.zig | 4 +- src/musl.zig | 2 +- src/print_air.zig | 2 +- src/print_zir.zig | 6 +-- src/stage1.zig | 2 +- src/test.zig | 2 +- src/translate_c.zig | 2 +- src/wasi_libc.zig | 2 +- test/cli.zig | 2 +- test/compare_output.zig | 2 +- test/standalone/brace_expansion/main.zig | 2 +- test/standalone/cat/main.zig | 2 +- tools/gen_spirv_spec.zig | 2 +- tools/gen_stubs.zig | 2 +- tools/merge_anal_dumps.zig | 2 +- tools/process_headers.zig | 2 +- tools/update-license-headers.zig | 2 +- tools/update_clang_options.zig | 7 +--- tools/update_cpu_features.zig | 4 +- tools/update_glibc.zig | 2 +- tools/update_spirv_features.zig | 2 +- 77 files changed, 202 insertions(+), 207 deletions(-) diff --git a/ci/srht/update-download-page.zig b/ci/srht/update-download-page.zig index b16a8609a80d..daaee18faf07 100644 --- a/ci/srht/update-download-page.zig +++ b/ci/srht/update-download-page.zig @@ -6,7 +6,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const out_dir = "out"; try std.fs.cwd().makePath(out_dir); diff --git a/doc/docgen.zig b/doc/docgen.zig index 1f6ff7461789..08502f0b7957 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -21,7 +21,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); var args_it = process.args(); diff --git a/doc/langref.html.in b/doc/langref.html.in index 631c9486286a..b2f211468eae 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -10061,7 +10061,7 @@ const expect = std.testing.expect; test "using an allocator" { var buffer: [100]u8 = undefined; - const allocator = std.heap.FixedBufferAllocator.init(&buffer).getAllocator(); + const allocator = std.heap.FixedBufferAllocator.init(&buffer).allocator(); const result = try concat(allocator, "foo", "bar"); try expect(std.mem.eql(u8, "foobar", result)); } @@ -10114,7 +10114,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const ptr = try allocator.create(i32); std.debug.print("ptr={*}\n", .{ptr}); @@ -10820,7 +10820,7 @@ const std = @import("std"); pub fn main() !void { var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; - const gpa = general_purpose_allocator.getAllocator(); + const gpa = general_purpose_allocator.allocator(); const args = try std.process.argsAlloc(gpa); defer std.process.argsFree(gpa, args); @@ -10842,7 +10842,7 @@ const PreopenList = std.fs.wasi.PreopenList; pub fn main() !void { var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; - const gpa = general_purpose_allocator.getAllocator(); + const gpa = general_purpose_allocator.allocator(); var preopens = PreopenList.init(gpa); defer preopens.deinit(); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 61f19c20d083..855c44c03291 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -460,7 +460,7 @@ const WindowsThreadImpl = struct { errdefer assert(windows.kernel32.HeapFree(heap_handle, 0, alloc_ptr) != 0); const instance_bytes = @ptrCast([*]u8, alloc_ptr)[0..alloc_bytes]; - const instance = std.heap.FixedBufferAllocator.init(instance_bytes).getAllocator().create(Instance) catch unreachable; + const instance = std.heap.FixedBufferAllocator.init(instance_bytes).allocator().create(Instance) catch unreachable; instance.* = .{ .fn_args = args, .thread = .{ diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index d88dae95ff73..fe98caf25f22 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -1119,7 +1119,7 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" { test "std.ArrayList/ArrayListUnmanaged.replaceRange" { var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const a = arena.getAllocator(); + const a = arena.allocator(); const init = [_]i32{ 1, 2, 3, 4, 5 }; const new = [_]i32{ 0, 0, 0 }; @@ -1263,7 +1263,7 @@ test "std.ArrayList/ArrayListUnmanaged.shrink still sets length on error.OutOfMe // use an arena allocator to make sure realloc returns error.OutOfMemory var arena = std.heap.ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const a = arena.getAllocator(); + const a = arena.allocator(); { var list = ArrayList(i32).init(a); @@ -1361,7 +1361,7 @@ test "ArrayListAligned/ArrayListAlignedUnmanaged accepts unaligned slices" { test "std.ArrayList(u0)" { // An ArrayList on zero-sized types should not need to allocate - const a = testing.FailingAllocator.init(testing.allocator, 0).getAllocator(); + const a = testing.FailingAllocator.init(testing.allocator, 0).allocator(); var list = ArrayList(u0).init(a); defer list.deinit(); diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 3b4a14110c13..6c502ef37ee0 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -177,7 +177,7 @@ test "std.atomic.Queue" { defer std.heap.page_allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory); - var a = fixed_buffer_allocator.getThreadSafeAllocator(); + var a = fixed_buffer_allocator.threadSafeAllocator(); var queue = Queue(i32).init(); var context = Context{ diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index c1b368b57191..a6396bb22bc6 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -89,7 +89,7 @@ test "std.atomic.stack" { defer std.heap.page_allocator.free(plenty_of_memory); var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(plenty_of_memory); - var a = fixed_buffer_allocator.getThreadSafeAllocator(); + var a = fixed_buffer_allocator.threadSafeAllocator(); var stack = Stack(i32).init(); var context = Context{ diff --git a/lib/std/build.zig b/lib/std/build.zig index dba27f86b9c5..378af18b2c9e 100644 --- a/lib/std/build.zig +++ b/lib/std/build.zig @@ -1285,7 +1285,7 @@ test "builder.findProgram compiles" { defer arena.deinit(); const builder = try Builder.create( - arena.getAllocator(), + arena.allocator(), "zig", "zig-cache", "zig-cache", @@ -3207,7 +3207,7 @@ test "Builder.dupePkg()" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var builder = try Builder.create( - arena.getAllocator(), + arena.allocator(), "test", "test", "test", @@ -3252,7 +3252,7 @@ test "LibExeObjStep.addPackage" { defer arena.deinit(); var builder = try Builder.create( - arena.getAllocator(), + arena.allocator(), "test", "test", "test", diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig index d3ac0d419628..d106b051718a 100644 --- a/lib/std/build/OptionsStep.zig +++ b/lib/std/build/OptionsStep.zig @@ -274,7 +274,7 @@ test "OptionsStep" { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var builder = try Builder.create( - arena.getAllocator(), + arena.allocator(), "test", "test", "test", diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index e0acc237d946..9ce8c1c38ed3 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -75,7 +75,7 @@ pub const StackTrace = struct { }; const tty_config = std.debug.detectTTYConfig(); try writer.writeAll("\n"); - std.debug.writeStackTrace(self, writer, arena.getAllocator(), debug_info, tty_config) catch |err| { + std.debug.writeStackTrace(self, writer, arena.allocator(), debug_info, tty_config) catch |err| { try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)}); }; try writer.writeAll("\n"); diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index cc9f1b28018d..13e14c7f3481 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -541,7 +541,7 @@ pub const ChildProcess = struct { var arena_allocator = std.heap.ArenaAllocator.init(self.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); // The POSIX standard does not allow malloc() between fork() and execve(), // and `self.allocator` may be a libc allocator. @@ -1149,7 +1149,7 @@ test "createNullDelimitedEnvMap" { var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); - const environ = try createNullDelimitedEnvMap(arena.getAllocator(), &envmap); + const environ = try createNullDelimitedEnvMap(arena.allocator(), &envmap); try testing.expectEqual(@as(usize, 5), environ.len); diff --git a/lib/std/crypto/benchmark.zig b/lib/std/crypto/benchmark.zig index 52e56ddf18be..9fd3c425444f 100644 --- a/lib/std/crypto/benchmark.zig +++ b/lib/std/crypto/benchmark.zig @@ -363,7 +363,7 @@ pub fn main() !void { var buffer: [1024]u8 = undefined; var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]); - const args = try std.process.argsAlloc(fixed.getAllocator()); + const args = try std.process.argsAlloc(fixed.allocator()); var filter: ?[]u8 = ""; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index b6990d675dc1..64db6eeadc39 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1566,7 +1566,7 @@ fn getDebugInfoAllocator() mem.Allocator { if (debug_info_allocator) |a| return a; debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const allocator = debug_info_arena_allocator.getAllocator(); + const allocator = debug_info_arena_allocator.allocator(); debug_info_allocator = allocator; return allocator; } diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index 191ed4de94df..23c89aabc5ce 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -173,12 +173,12 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; const resume_node_count = std.math.max(extra_thread_count, 1); - self.eventfd_resume_nodes = try self.arena.getAllocator().alloc( + self.eventfd_resume_nodes = try self.arena.allocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, ); - self.extra_threads = try self.arena.getAllocator().alloc(Thread, extra_thread_count); + self.extra_threads = try self.arena.allocator().alloc(Thread, extra_thread_count); try self.initOsData(extra_thread_count); errdefer self.deinitOsData(); diff --git a/lib/std/fs/test.zig b/lib/std/fs/test.zig index 437ff5620d7c..1ab6608327ad 100644 --- a/lib/std/fs/test.zig +++ b/lib/std/fs/test.zig @@ -52,7 +52,7 @@ test "accessAbsolute" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); @@ -71,7 +71,7 @@ test "openDirAbsolute" { try tmp.dir.makeDir("subdir"); var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..], "subdir" }); @@ -111,7 +111,7 @@ test "readLinkAbsolute" { // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); @@ -162,7 +162,7 @@ test "Dir.Iterator" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); var entries = std.ArrayList(Dir.Entry).init(allocator); @@ -207,7 +207,7 @@ test "Dir.realpath smoke test" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] }); @@ -482,7 +482,7 @@ test "renameAbsolute" { // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp_dir.sub_path[0..] }); @@ -993,7 +993,7 @@ test ". and .. in absolute functions" { var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index 5b278ca0b11e..f6f1da18949e 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -165,7 +165,7 @@ pub fn main() !void { var buffer: [1024]u8 = undefined; var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]); - const args = try std.process.argsAlloc(fixed.getAllocator()); + const args = try std.process.argsAlloc(fixed.allocator()); var filter: ?[]u8 = ""; var count: usize = mode(128 * MiB); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 213953553f28..c9a5062570a2 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -573,7 +573,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; } - pub fn getAllocator(self: *HeapAllocator) Allocator { + pub fn allocator(self: *HeapAllocator) Allocator { return Allocator.init(self, alloc, resize); } @@ -680,14 +680,14 @@ pub const FixedBufferAllocator = struct { }; } - /// *WARNING* using this at the same time as the interface returned by `getThreadSafeAllocator` is not thread safe - pub fn getAllocator(self: *FixedBufferAllocator) Allocator { + /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe + pub fn allocator(self: *FixedBufferAllocator) Allocator { return Allocator.init(self, alloc, resize); } /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` /// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe - pub fn getThreadSafeAllocator(self: *FixedBufferAllocator) Allocator { + pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator { return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize); } @@ -775,7 +775,7 @@ pub const FixedBufferAllocator = struct { } }; -pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `getThreadSafeAllocator` on FixedBufferAllocator"); +pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `threadSafeAllocator` on FixedBufferAllocator"); pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) { return StackFallbackAllocator(size){ @@ -909,7 +909,7 @@ test "HeapAllocator" { if (builtin.os.tag == .windows) { var heap_allocator = HeapAllocator.init(); defer heap_allocator.deinit(); - const allocator = heap_allocator.getAllocator(); + const allocator = heap_allocator.allocator(); try testAllocator(allocator); try testAllocatorAligned(allocator); @@ -921,7 +921,7 @@ test "HeapAllocator" { test "ArenaAllocator" { var arena_allocator = ArenaAllocator.init(page_allocator); defer arena_allocator.deinit(); - const allocator = arena_allocator.getAllocator(); + const allocator = arena_allocator.allocator(); try testAllocator(allocator); try testAllocatorAligned(allocator); @@ -932,7 +932,7 @@ test "ArenaAllocator" { var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; test "FixedBufferAllocator" { var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); - const allocator = fixed_buffer_allocator.getAllocator(); + const allocator = fixed_buffer_allocator.allocator(); try testAllocator(allocator); try testAllocatorAligned(allocator); @@ -943,7 +943,7 @@ test "FixedBufferAllocator" { test "FixedBufferAllocator.reset" { var buf: [8]u8 align(@alignOf(u64)) = undefined; var fba = FixedBufferAllocator.init(buf[0..]); - const allocator = fba.getAllocator(); + const allocator = fba.allocator(); const X = 0xeeeeeeeeeeeeeeee; const Y = 0xffffffffffffffff; @@ -976,7 +976,7 @@ test "FixedBufferAllocator Reuse memory on realloc" { // check if we re-use the memory { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); - const allocator = fixed_buffer_allocator.getAllocator(); + const allocator = fixed_buffer_allocator.allocator(); var slice0 = try allocator.alloc(u8, 5); try testing.expect(slice0.len == 5); @@ -988,7 +988,7 @@ test "FixedBufferAllocator Reuse memory on realloc" { // check that we don't re-use the memory if it's not the most recent block { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); - const allocator = fixed_buffer_allocator.getAllocator(); + const allocator = fixed_buffer_allocator.allocator(); var slice0 = try allocator.alloc(u8, 2); slice0[0] = 1; @@ -1005,16 +1005,16 @@ test "FixedBufferAllocator Reuse memory on realloc" { test "Thread safe FixedBufferAllocator" { var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); - try testAllocator(fixed_buffer_allocator.getThreadSafeAllocator()); - try testAllocatorAligned(fixed_buffer_allocator.getThreadSafeAllocator()); - try testAllocatorLargeAlignment(fixed_buffer_allocator.getThreadSafeAllocator()); - try testAllocatorAlignedShrink(fixed_buffer_allocator.getThreadSafeAllocator()); + try testAllocator(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator()); + try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator()); } /// This one should not try alignments that exceed what C malloc can handle. pub fn testAllocator(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.getAllocator(); + const allocator = validationAllocator.allocator(); var slice = try allocator.alloc(*i32, 100); try testing.expect(slice.len == 100); @@ -1060,7 +1060,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void { pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.getAllocator(); + const allocator = validationAllocator.allocator(); // Test a few alignment values, smaller and bigger than the type's one inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| { @@ -1090,7 +1090,7 @@ pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void { pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.getAllocator(); + const allocator = validationAllocator.allocator(); //Maybe a platform's page_size is actually the same as or // very near usize? @@ -1122,10 +1122,10 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void { pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { var validationAllocator = mem.validationWrap(base_allocator); - const allocator = validationAllocator.getAllocator(); + const allocator = validationAllocator.allocator(); var debug_buffer: [1000]u8 = undefined; - const debug_allocator = FixedBufferAllocator.init(&debug_buffer).getAllocator(); + const debug_allocator = FixedBufferAllocator.init(&debug_buffer).allocator(); const alloc_size = mem.page_size * 2 + 50; var slice = try allocator.alignedAlloc(u8, 16, alloc_size); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index 65b08399456f..c5d7d5ec9db6 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -23,7 +23,7 @@ pub const ArenaAllocator = struct { } }; - pub fn getAllocator(self: *ArenaAllocator) Allocator { + pub fn allocator(self: *ArenaAllocator) Allocator { return Allocator.init(self, alloc, resize); } diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index d4f1dde29979..822db6fb1b96 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -280,7 +280,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } }; - pub fn getAllocator(self: *Self) Allocator { + pub fn allocator(self: *Self) Allocator { return Allocator.init(self, alloc, resize); } @@ -830,7 +830,7 @@ const test_config = Config{}; test "small allocations - free in same order" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var list = std.ArrayList(*u64).init(std.testing.allocator); defer list.deinit(); @@ -849,7 +849,7 @@ test "small allocations - free in same order" { test "small allocations - free in reverse order" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var list = std.ArrayList(*u64).init(std.testing.allocator); defer list.deinit(); @@ -868,7 +868,7 @@ test "small allocations - free in reverse order" { test "large allocations" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); const ptr1 = try allocator.alloc(u64, 42768); const ptr2 = try allocator.alloc(u64, 52768); @@ -881,7 +881,7 @@ test "large allocations" { test "realloc" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1); defer allocator.free(slice); @@ -903,7 +903,7 @@ test "realloc" { test "shrink" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, 20); defer allocator.free(slice); @@ -926,7 +926,7 @@ test "shrink" { test "large object - grow" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice1 = try allocator.alloc(u8, page_size * 2 - 20); defer allocator.free(slice1); @@ -944,7 +944,7 @@ test "large object - grow" { test "realloc small object to large object" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, 70); defer allocator.free(slice); @@ -961,7 +961,7 @@ test "realloc small object to large object" { test "shrink large object to large object" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, page_size * 2 + 50); defer allocator.free(slice); @@ -984,10 +984,10 @@ test "shrink large object to large object" { test "shrink large object to large object with larger alignment" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var debug_buffer: [1000]u8 = undefined; - const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator(); + const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).allocator(); const alloc_size = page_size * 2 + 50; var slice = try allocator.alignedAlloc(u8, 16, alloc_size); @@ -1019,7 +1019,7 @@ test "shrink large object to large object with larger alignment" { test "realloc large object to small object" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, page_size * 2 + 50); defer allocator.free(slice); @@ -1037,7 +1037,7 @@ test "overrideable mutexes" { .mutex = std.Thread.Mutex{}, }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); const ptr = try allocator.create(i32); defer allocator.destroy(ptr); @@ -1046,7 +1046,7 @@ test "overrideable mutexes" { test "non-page-allocator backing allocator" { var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); const ptr = try allocator.create(i32); defer allocator.destroy(ptr); @@ -1055,10 +1055,10 @@ test "non-page-allocator backing allocator" { test "realloc large object to larger alignment" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var debug_buffer: [1000]u8 = undefined; - const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).getAllocator(); + const debug_allocator = std.heap.FixedBufferAllocator.init(&debug_buffer).allocator(); var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); defer allocator.free(slice); @@ -1094,9 +1094,9 @@ test "realloc large object to larger alignment" { test "large object shrinks to small but allocation fails during shrink" { var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3); - var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.getAllocator() }; + var gpa = GeneralPurposeAllocator(.{}){ .backing_allocator = failing_allocator.allocator() }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); var slice = try allocator.alloc(u8, page_size * 2 + 50); defer allocator.free(slice); @@ -1113,7 +1113,7 @@ test "large object shrinks to small but allocation fails during shrink" { test "objects of size 1024 and 2048" { var gpa = GeneralPurposeAllocator(test_config){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); const slice = try allocator.alloc(u8, 1025); const slice2 = try allocator.alloc(u8, 3000); @@ -1125,7 +1125,7 @@ test "objects of size 1024 and 2048" { test "setting a memory cap" { var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); gpa.setRequestedMemoryLimit(1010); @@ -1154,9 +1154,9 @@ test "double frees" { defer std.testing.expect(!backing_gpa.deinit()) catch @panic("leak"); const GPA = GeneralPurposeAllocator(.{ .safety = true, .never_unmap = true, .retain_metadata = true }); - var gpa = GPA{ .backing_allocator = backing_gpa.getAllocator() }; + var gpa = GPA{ .backing_allocator = backing_gpa.allocator() }; defer std.testing.expect(!gpa.deinit()) catch @panic("leak"); - const allocator = gpa.getAllocator(); + const allocator = gpa.allocator(); // detect a small allocation double free, even though bucket is emptied const index: usize = 6; diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index 1f3146f79fb3..cab17243126b 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -17,7 +17,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { }; } - pub fn getAllocator(self: *Self) Allocator { + pub fn allocator(self: *Self) Allocator { return Allocator.init(self, alloc, resize); } @@ -82,7 +82,7 @@ test "LogToWriterAllocator" { var allocator_buf: [10]u8 = undefined; var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); - const allocator = logToWriterAllocator(fixedBufferAllocator.getAllocator(), fbs.writer()).getAllocator(); + const allocator = logToWriterAllocator(fixedBufferAllocator.allocator(), fbs.writer()).allocator(); var a = try allocator.alloc(u8, 10); a = allocator.shrink(a, 5); diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 34dc554dee5f..da9e731fd5e8 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -32,7 +32,7 @@ pub fn ScopedLoggingAllocator( }; } - pub fn getAllocator(self: *Self) Allocator { + pub fn allocator(self: *Self) Allocator { return Allocator.init(self, alloc, resize); } diff --git a/lib/std/json.zig b/lib/std/json.zig index 978213a5961c..658fec6b7955 100644 --- a/lib/std/json.zig +++ b/lib/std/json.zig @@ -2033,7 +2033,7 @@ test "parse into tagged union" { { // failing allocations should be bubbled up instantly without trying next member var fail_alloc = testing.FailingAllocator.init(testing.allocator, 0); - const options = ParseOptions{ .allocator = fail_alloc.getAllocator() }; + const options = ParseOptions{ .allocator = fail_alloc.allocator() }; const T = union(enum) { // both fields here match the input string: []const u8, @@ -2081,7 +2081,7 @@ test "parse union bubbles up AllocatorRequired" { test "parseFree descends into tagged union" { var fail_alloc = testing.FailingAllocator.init(testing.allocator, 1); - const options = ParseOptions{ .allocator = fail_alloc.getAllocator() }; + const options = ParseOptions{ .allocator = fail_alloc.allocator() }; const T = union(enum) { int: i32, float: f64, @@ -2364,7 +2364,7 @@ pub const Parser = struct { var arena = ArenaAllocator.init(p.allocator); errdefer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); while (try s.next()) |token| { try p.transition(allocator, input, s.i - 1, token); @@ -2746,13 +2746,13 @@ fn testParse(arena_allocator: std.mem.Allocator, json_str: []const u8) !Value { test "parsing empty string gives appropriate error" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.getAllocator(), "")); + try testing.expectError(error.UnexpectedEndOfJson, testParse(arena_allocator.allocator(), "")); } test "integer after float has proper type" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - const json = try testParse(arena_allocator.getAllocator(), + const json = try testParse(arena_allocator.allocator(), \\{ \\ "float": 3.14, \\ "ints": [1, 2, 3] @@ -2787,7 +2787,7 @@ test "escaped characters" { \\} ; - const obj = (try testParse(arena_allocator.getAllocator(), input)).Object; + const obj = (try testParse(arena_allocator.allocator(), input)).Object; try testing.expectEqualSlices(u8, obj.get("backslash").?.String, "\\"); try testing.expectEqualSlices(u8, obj.get("forwardslash").?.String, "/"); @@ -2813,7 +2813,7 @@ test "string copy option" { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena_allocator.deinit(); - const allocator = arena_allocator.getAllocator(); + const allocator = arena_allocator.allocator(); const tree_nocopy = try Parser.init(allocator, false).parse(input); const obj_nocopy = tree_nocopy.root.Object; diff --git a/lib/std/json/write_stream.zig b/lib/std/json/write_stream.zig index 2ef6fa3a86c2..3393f8a6eebf 100644 --- a/lib/std/json/write_stream.zig +++ b/lib/std/json/write_stream.zig @@ -243,7 +243,7 @@ test "json write stream" { try w.beginObject(); try w.objectField("object"); - try w.emitJson(try getJsonObject(arena_allocator.getAllocator())); + try w.emitJson(try getJsonObject(arena_allocator.allocator())); try w.objectField("string"); try w.emitString("This is a string"); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 4b43069d1e79..37b11412722c 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -46,13 +46,13 @@ pub fn ValidationAllocator(comptime T: type) type { }; } - pub fn getAllocator(self: *Self) Allocator { + pub fn allocator(self: *Self) Allocator { return Allocator.init(self, alloc, resize); } fn getUnderlyingAllocatorPtr(self: *Self) Allocator { if (T == Allocator) return self.underlying_allocator; - return self.underlying_allocator.getAllocator(); + return self.underlying_allocator.allocator(); } pub fn alloc( diff --git a/lib/std/net.zig b/lib/std/net.zig index 4f5ce84034b0..6199d739d7a7 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -704,7 +704,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A var arena = std.heap.ArenaAllocator.init(allocator); errdefer arena.deinit(); - const result = try arena.getAllocator().create(AddressList); + const result = try arena.allocator().create(AddressList); result.* = AddressList{ .arena = arena, .addrs = undefined, @@ -712,7 +712,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A }; break :blk result; }; - const arena = result.arena.getAllocator(); + const arena = result.arena.allocator(); errdefer result.arena.deinit(); if (builtin.target.os.tag == .windows or builtin.link_libc) { diff --git a/lib/std/os/test.zig b/lib/std/os/test.zig index fb5105706c4f..3e6603677c31 100644 --- a/lib/std/os/test.zig +++ b/lib/std/os/test.zig @@ -58,7 +58,7 @@ test "open smoke test" { // Get base abs path var arena = ArenaAllocator.init(testing.allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const base_path = blk: { const relative_path = try fs.path.join(allocator, &[_][]const u8{ "zig-cache", "tmp", tmp.sub_path[0..] }); diff --git a/lib/std/process.zig b/lib/std/process.zig index 6b45a7e7aa34..243978591b94 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -854,7 +854,7 @@ pub fn execve( var arena_allocator = std.heap.ArenaAllocator.init(allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null); for (argv) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; diff --git a/lib/std/special/build_runner.zig b/lib/std/special/build_runner.zig index ab844fef57dc..d798d2ab6fe0 100644 --- a/lib/std/special/build_runner.zig +++ b/lib/std/special/build_runner.zig @@ -16,7 +16,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); var args = try process.argsAlloc(allocator); defer process.argsFree(allocator, args); diff --git a/lib/std/special/test_runner.zig b/lib/std/special/test_runner.zig index f90e8aa58ec0..9a52ebdbb1e9 100644 --- a/lib/std/special/test_runner.zig +++ b/lib/std/special/test_runner.zig @@ -10,7 +10,7 @@ var args_buffer: [std.fs.MAX_PATH_BYTES + std.mem.page_size]u8 = undefined; var args_allocator = std.heap.FixedBufferAllocator.init(&args_buffer); fn processArgs() void { - const args = std.process.argsAlloc(args_allocator.getAllocator()) catch { + const args = std.process.argsAlloc(args_allocator.allocator()) catch { @panic("Too many bytes passed over the CLI to the test runner"); }; if (args.len != 2) { diff --git a/lib/std/testing.zig b/lib/std/testing.zig index b588abbd8caf..e5c2afab401e 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -7,11 +7,11 @@ const print = std.debug.print; pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator; /// This should only be used in temporary test programs. -pub const allocator = allocator_instance.getAllocator(); +pub const allocator = allocator_instance.allocator(); pub var allocator_instance = std.heap.GeneralPurposeAllocator(.{}){}; -pub const failing_allocator = failing_allocator_instance.getAllocator(); -pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.getAllocator(), 0); +pub const failing_allocator = failing_allocator_instance.allocator(); +pub var failing_allocator_instance = FailingAllocator.init(base_allocator_instance.allocator(), 0); pub var base_allocator_instance = std.heap.FixedBufferAllocator.init(""); diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 137af925ad69..15da5091fbe1 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -40,7 +40,7 @@ pub const FailingAllocator = struct { }; } - pub fn getAllocator(self: *FailingAllocator) mem.Allocator { + pub fn allocator(self: *FailingAllocator) mem.Allocator { return mem.Allocator.init(self, alloc, resize); } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index 0fb435791762..ef716ffb32fb 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -5351,8 +5351,8 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { const needed_alloc_count = x: { // Try it once with unlimited memory, make sure it works var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), maxInt(usize)); - const allocator = failing_allocator.getAllocator(); + var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.allocator(), maxInt(usize)); + const allocator = failing_allocator.allocator(); var anything_changed: bool = undefined; const result_source = try testParse(source, allocator, &anything_changed); try std.testing.expectEqualStrings(expected_source, result_source); @@ -5369,9 +5369,9 @@ fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { var fail_index: usize = 0; while (fail_index < needed_alloc_count) : (fail_index += 1) { var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.getAllocator(), fail_index); + var failing_allocator = std.testing.FailingAllocator.init(fixed_allocator.allocator(), fail_index); var anything_changed: bool = undefined; - if (testParse(source, failing_allocator.getAllocator(), &anything_changed)) |_| { + if (testParse(source, failing_allocator.allocator(), &anything_changed)) |_| { return error.NondeterministicMemoryUsage; } else |err| switch (err) { error.OutOfMemory => { diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig index d2286914b048..d3fc90eaeaae 100644 --- a/lib/std/zig/perf_test.zig +++ b/lib/std/zig/perf_test.zig @@ -33,7 +33,7 @@ pub fn main() !void { fn testOnce() usize { var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var allocator = fixed_buf_alloc.getAllocator(); + var allocator = fixed_buf_alloc.allocator(); _ = std.zig.parse(allocator, source) catch @panic("parse failure"); return fixed_buf_alloc.end_index; } diff --git a/lib/std/zig/string_literal.zig b/lib/std/zig/string_literal.zig index 5e44e5f8f3a0..1eaab26e3a29 100644 --- a/lib/std/zig/string_literal.zig +++ b/lib/std/zig/string_literal.zig @@ -147,7 +147,7 @@ test "parse" { var fixed_buf_mem: [32]u8 = undefined; var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buf_mem[0..]); - var alloc = fixed_buf_alloc.getAllocator(); + var alloc = fixed_buf_alloc.allocator(); try expect(eql(u8, "foo", try parseAlloc(alloc, "\"foo\""))); try expect(eql(u8, "foo", try parseAlloc(alloc, "\"f\x6f\x6f\""))); diff --git a/src/AstGen.zig b/src/AstGen.zig index 2e58c8c5d9a4..f51a73e12aad 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -98,7 +98,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { var astgen: AstGen = .{ .gpa = gpa, - .arena = arena.getAllocator(), + .arena = arena.allocator(), .tree = &tree, }; defer astgen.deinit(gpa); @@ -1939,7 +1939,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod var block_arena = std.heap.ArenaAllocator.init(gz.astgen.gpa); defer block_arena.deinit(); - const block_arena_allocator = block_arena.getAllocator(); + const block_arena_allocator = block_arena.allocator(); var noreturn_src_node: Ast.Node.Index = 0; var scope = parent_scope; diff --git a/src/Compilation.zig b/src/Compilation.zig index 7c855862fd56..6589c980bf97 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -412,7 +412,7 @@ pub const AllErrors = struct { errors: *std.ArrayList(Message), module_err_msg: Module.ErrorMsg, ) !void { - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const notes = try allocator.alloc(Message, module_err_msg.notes.len); for (notes) |*note, i| { const module_note = module_err_msg.notes[i]; @@ -549,7 +549,7 @@ pub const AllErrors = struct { msg: []const u8, optional_children: ?AllErrors, ) !void { - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const duped_msg = try allocator.dupe(u8, msg); if (optional_children) |*children| { try errors.append(.{ .plain = .{ @@ -788,7 +788,7 @@ fn addPackageTableToCacheHash( seen_table: *std.AutoHashMap(*Package, void), hash_type: union(enum) { path_bytes, files: *Cache.Manifest }, ) (error{OutOfMemory} || std.os.GetCwdError)!void { - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const packages = try allocator.alloc(Package.Table.KV, pkg_table.count()); { @@ -852,7 +852,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { // initialization and then is freed in deinit(). var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); // We put the `Compilation` itself in the arena. Freeing the arena will free the module. // It's initialized later after we prepare the initialization options. @@ -1210,7 +1210,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { { var local_arena = std.heap.ArenaAllocator.init(gpa); defer local_arena.deinit(); - var seen_table = std.AutoHashMap(*Package, void).init(local_arena.getAllocator()); + var seen_table = std.AutoHashMap(*Package, void).init(local_arena.allocator()); try addPackageTableToCacheHash(&hash, &local_arena, main_pkg.table, &seen_table, .path_bytes); } hash.add(valgrind); @@ -2013,7 +2013,7 @@ pub fn totalErrorCount(self: *Compilation) usize { pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors { var arena = std.heap.ArenaAllocator.init(self.gpa); errdefer arena.deinit(); - const arena_allocator = arena.getAllocator(); + const arena_allocator = arena.allocator(); var errors = std.ArrayList(AllErrors.Message).init(self.gpa); defer errors.deinit(); @@ -2295,7 +2295,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress var tmp_arena = std.heap.ArenaAllocator.init(gpa); defer tmp_arena.deinit(); - const sema_arena = tmp_arena.getAllocator(); + const sema_arena = tmp_arena.allocator(); const sema_frame = tracy.namedFrame("sema"); var sema_frame_ended = false; @@ -2390,7 +2390,7 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), .typedefs = c_codegen.TypedefMap.init(gpa), - .typedefs_arena = typedefs_arena.getAllocator(), + .typedefs_arena = typedefs_arena.allocator(), }; defer dg.fwd_decl.deinit(); defer dg.typedefs.deinit(); @@ -2844,7 +2844,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult { const digest = if (!actual_hit) digest: { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const tmp_digest = man.hash.peek(); const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest }); @@ -3099,7 +3099,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const c_source_basename = std.fs.path.basename(c_object.src.src_path); @@ -4420,7 +4420,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); // Here we use the legacy stage1 C++ compiler to compile Zig code. const mod = comp.bin_file.options.module.?; @@ -4457,7 +4457,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node _ = try man.addFile(main_zig_file, null); { - var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.getAllocator()); + var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.allocator()); try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = &man }); } man.hash.add(comp.bin_file.options.valgrind); diff --git a/src/DepTokenizer.zig b/src/DepTokenizer.zig index e99bfc746471..09712baa990d 100644 --- a/src/DepTokenizer.zig +++ b/src/DepTokenizer.zig @@ -878,7 +878,7 @@ test "error prereq - continuation expecting end-of-line" { // - tokenize input, emit textual representation, and compare to expect fn depTokenizer(input: []const u8, expect: []const u8) !void { var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); defer arena_allocator.deinit(); var it: Tokenizer = .{ .bytes = input }; diff --git a/src/Module.zig b/src/Module.zig index d016418d8d8e..0e5e3690bd6a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -517,7 +517,7 @@ pub const Decl = struct { pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { assert(decl.value_arena == null); - const arena_state = try arena.getAllocator().create(std.heap.ArenaAllocator.State); + const arena_state = try arena.allocator().create(std.heap.ArenaAllocator.State); arena_state.* = arena.state; decl.value_arena = arena_state; } @@ -3159,7 +3159,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const gpa = mod.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); @@ -3203,7 +3203,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { var sema_arena = std.heap.ArenaAllocator.init(gpa); defer sema_arena.deinit(); - const sema_arena_allocator = sema_arena.getAllocator(); + const sema_arena_allocator = sema_arena.allocator(); var sema: Sema = .{ .mod = mod, @@ -3267,11 +3267,11 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { // We need the memory for the Type to go into the arena for the Decl var decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer decl_arena.deinit(); - const decl_arena_allocator = decl_arena.getAllocator(); + const decl_arena_allocator = decl_arena.allocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - const analysis_arena_allocator = analysis_arena.getAllocator(); + const analysis_arena_allocator = analysis_arena.allocator(); var sema: Sema = .{ .mod = mod, @@ -4132,7 +4132,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: Allocator) Sem // Use the Decl's arena for captured values. var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; - const decl_arena_allocator = decl_arena.getAllocator(); + const decl_arena_allocator = decl_arena.allocator(); var sema: Sema = .{ .mod = mod, @@ -4756,7 +4756,7 @@ pub fn populateTestFunctions(mod: *Module) !void { // decl reference it as a slice. var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.getAllocator(); + const arena = new_decl_arena.allocator(); const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); const array_decl = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ @@ -4807,7 +4807,7 @@ pub fn populateTestFunctions(mod: *Module) !void { { var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.getAllocator(); + const arena = new_decl_arena.allocator(); // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); diff --git a/src/Sema.zig b/src/Sema.zig index 9e0aa2f75edb..7bf36d9ae034 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -418,7 +418,7 @@ pub const Block = struct { finished: bool, pub fn arena(wad: *WipAnonDecl) Allocator { - return wad.new_decl_arena.getAllocator(); + return wad.new_decl_arena.allocator(); } pub fn deinit(wad: *WipAnonDecl) void { @@ -1594,7 +1594,7 @@ fn zirStructDecl( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); @@ -1699,7 +1699,7 @@ fn zirEnumDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); @@ -1889,7 +1889,7 @@ fn zirUnionDecl( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const union_obj = try new_decl_arena_allocator.create(Module.Union); const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union"; @@ -1958,7 +1958,7 @@ fn zirOpaqueDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); @@ -2012,7 +2012,7 @@ fn zirErrorSetDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); @@ -3940,7 +3940,7 @@ fn analyzeCall( { var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); for (memoized_call_key.args) |*arg| { arg.* = try arg.*.copy(arena); @@ -4074,7 +4074,7 @@ fn analyzeCall( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); // Re-run the block that creates the function, with the comptime parameters // pre-populated inside `inst_map`. This causes `param_comptime` and @@ -6053,8 +6053,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const target = sema.mod.getTarget(); - const min_int = try operand_ty.minInt(arena.getAllocator(), target); - const max_int = try operand_ty.maxInt(arena.getAllocator(), target); + const min_int = try operand_ty.minInt(arena.allocator(), target); + const max_int = try operand_ty.maxInt(arena.allocator(), target); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -12801,7 +12801,7 @@ const ComptimePtrMutationKit = struct { fn beginArena(self: *ComptimePtrMutationKit, gpa: Allocator) Allocator { self.decl_arena = self.decl_ref_mut.decl.value_arena.?.promote(gpa); - return self.decl_arena.getAllocator(); + return self.decl_arena.allocator(); } fn finishArena(self: *ComptimePtrMutationKit) void { @@ -14293,7 +14293,7 @@ fn semaStructFields( var decl_arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = decl_arena.state; - const decl_arena_allocator = decl_arena.getAllocator(); + const decl_arena_allocator = decl_arena.allocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -14301,7 +14301,7 @@ fn semaStructFields( var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena.getAllocator(), + .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, @@ -14461,7 +14461,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var decl_arena = union_obj.owner_decl.value_arena.?.promote(gpa); defer union_obj.owner_decl.value_arena.?.* = decl_arena.state; - const decl_arena_allocator = decl_arena.getAllocator(); + const decl_arena_allocator = decl_arena.allocator(); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -14469,7 +14469,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena.getAllocator(), + .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, @@ -14623,7 +14623,7 @@ fn generateUnionTagTypeNumbered( var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); @@ -14660,7 +14660,7 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: u32) !Type var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.getAllocator(); + const new_decl_arena_allocator = new_decl_arena.allocator(); const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 142bf1a146fe..f54ae7f76de1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -390,7 +390,7 @@ pub const DeclGen = struct { // Fall back to generic implementation. var arena = std.heap.ArenaAllocator.init(dg.module.gpa); defer arena.deinit(); - const arena_allocator = arena.getAllocator(); + const arena_allocator = arena.allocator(); try writer.writeAll("{"); var index: usize = 0; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 31d3461846d3..4600c2e07e69 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -331,7 +331,7 @@ pub const Object = struct { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const mod = comp.bin_file.options.module.?; const cache_dir = mod.zig_cache_artifact_directory; @@ -779,7 +779,7 @@ pub const DeclGen = struct { // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator()); + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const opaque_obj = t.castTag(.@"opaque").?.data; const name = try opaque_obj.getFullyQualifiedName(gpa); @@ -837,7 +837,7 @@ pub const DeclGen = struct { // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator()); + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const struct_obj = t.castTag(.@"struct").?.data; @@ -871,7 +871,7 @@ pub const DeclGen = struct { // The Type memory is ephemeral; since we want to store a longer-lived // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.getAllocator()); + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const union_obj = t.cast(Type.Payload.Union).?.data; const target = dg.module.getTarget(); @@ -2485,7 +2485,7 @@ pub const FuncGen = struct { var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const llvm_params_len = args.len; const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len); diff --git a/src/crash_report.zig b/src/crash_report.zig index 92c37d2ac815..5d96ebc669ef 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -85,7 +85,7 @@ fn dumpStatusReport() !void { const anal = zir_state orelse return; // Note: We have the panic mutex here, so we can safely use the global crash heap. var fba = std.heap.FixedBufferAllocator.init(&crash_heap); - const allocator = fba.getAllocator(); + const allocator = fba.allocator(); const stderr = io.getStdErr().writer(); const block: *Sema.Block = anal.block; diff --git a/src/glibc.zig b/src/glibc.zig index c3f2da599eff..e67c3360e9e3 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -65,7 +65,7 @@ pub fn loadMetaData(gpa: Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!* var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); var all_versions = std.ArrayListUnmanaged(std.builtin.Version){}; var all_functions = std.ArrayListUnmanaged(Fn){}; @@ -256,7 +256,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); switch (crt_file) { .crti_o => { @@ -711,7 +711,7 @@ pub fn buildSharedObjects(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const target = comp.getTarget(); const target_version = target.os.version_range.linux.glibc; diff --git a/src/libcxx.zig b/src/libcxx.zig index 908df3ca2592..fe96207c483a 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -89,7 +89,7 @@ pub fn buildLibCXX(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const root_name = "c++"; const output_mode = .Lib; @@ -236,7 +236,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const root_name = "c++abi"; const output_mode = .Lib; diff --git a/src/libtsan.zig b/src/libtsan.zig index 47089cc7790f..0f05957387ff 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -15,7 +15,7 @@ pub fn buildTsan(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const root_name = "tsan"; const output_mode = .Lib; diff --git a/src/libunwind.zig b/src/libunwind.zig index dabd8631b9e4..95c58936fa6d 100644 --- a/src/libunwind.zig +++ b/src/libunwind.zig @@ -17,7 +17,7 @@ pub fn buildStaticLib(comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const root_name = "unwind"; const output_mode = .Lib; diff --git a/src/link.zig b/src/link.zig index 0b191ca8daf8..79a1c634624c 100644 --- a/src/link.zig +++ b/src/link.zig @@ -628,7 +628,7 @@ pub const File = struct { var arena_allocator = std.heap.ArenaAllocator.init(base.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/C.zig b/src/link/C.zig index 6bdace3fcaf3..6599008c737e 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -128,7 +128,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .decl = decl, .fwd_decl = fwd_decl.toManaged(module.gpa), .typedefs = typedefs.promote(module.gpa), - .typedefs_arena = self.arena.getAllocator(), + .typedefs_arena = self.arena.allocator(), }, .code = code.toManaged(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code @@ -193,7 +193,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .decl = decl, .fwd_decl = fwd_decl.toManaged(module.gpa), .typedefs = typedefs.promote(module.gpa), - .typedefs_arena = self.arena.getAllocator(), + .typedefs_arena = self.arena.allocator(), }, .code = code.toManaged(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 00bddfe578b8..2445b11caf0b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -877,7 +877,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 200ca488984d..24f8a02b9522 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1243,7 +1243,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 5e0e76648351..db2b8ffc4291 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -412,7 +412,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. @@ -5379,7 +5379,7 @@ fn snapshotState(self: *MachO) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const out_file = try emit.directory.handle.createFile("snapshots.json", .{ .truncate = self.cold_start, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index bc7e4d71a414..7dfbee2a1ffb 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -168,7 +168,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void { try fn_map_res.value_ptr.functions.put(gpa, decl, out); } else { const file = decl.getFileScope(); - const arena = self.path_arena.getAllocator(); + const arena = self.path_arena.allocator(); // each file gets a symbol fn_map_res.value_ptr.* = .{ .sym_index = blk: { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 7ffd067596d6..367f3376f02c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -950,7 +950,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void { var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. diff --git a/src/link/tapi.zig b/src/link/tapi.zig index fe5ef2af9c9f..7a55a5104da5 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -120,7 +120,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as []TbdV4", .{}); const inner = lib_stub.yaml.parse([]TbdV4) catch break :err; - var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, inner.len); + var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len); for (inner) |doc, i| { out[i] = .{ .v4 = doc }; } @@ -130,7 +130,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as TbdV4", .{}); const inner = lib_stub.yaml.parse(TbdV4) catch break :err; - var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1); + var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, 1); out[0] = .{ .v4 = inner }; break :blk out; } @@ -148,7 +148,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as TbdV3", .{}); const inner = lib_stub.yaml.parse(TbdV3) catch break :err; - var out = try lib_stub.yaml.arena.getAllocator().alloc(Tbd, 1); + var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, 1); out[0] = .{ .v3 = inner }; break :blk out; } diff --git a/src/link/tapi/yaml.zig b/src/link/tapi/yaml.zig index 261caee717f1..7c1997604d30 100644 --- a/src/link/tapi/yaml.zig +++ b/src/link/tapi/yaml.zig @@ -248,7 +248,7 @@ pub const Yaml = struct { pub fn load(allocator: Allocator, source: []const u8) !Yaml { var arena = ArenaAllocator.init(allocator); - const arena_allocator = arena.getAllocator(); + const arena_allocator = arena.allocator(); var tree = Tree.init(arena_allocator); try tree.parse(source); @@ -300,7 +300,7 @@ pub const Yaml = struct { .Pointer => |info| { switch (info.size) { .Slice => { - var parsed = try self.arena.getAllocator().alloc(info.child, self.docs.items.len); + var parsed = try self.arena.allocator().alloc(info.child, self.docs.items.len); for (self.docs.items) |doc, i| { parsed[i] = try self.parseValue(info.child, doc); } @@ -362,7 +362,7 @@ pub const Yaml = struct { inline for (struct_info.fields) |field| { const value: ?Value = map.get(field.name) orelse blk: { - const field_name = try mem.replaceOwned(u8, self.arena.getAllocator(), field.name, "_", "-"); + const field_name = try mem.replaceOwned(u8, self.arena.allocator(), field.name, "_", "-"); break :blk map.get(field_name); }; @@ -383,7 +383,7 @@ pub const Yaml = struct { fn parsePointer(self: *Yaml, comptime T: type, value: Value) Error!T { const ptr_info = @typeInfo(T).Pointer; - const arena = self.arena.getAllocator(); + const arena = self.arena.allocator(); switch (ptr_info.size) { .Slice => { diff --git a/src/main.zig b/src/main.zig index c97415ff29e5..981a76a36411 100644 --- a/src/main.zig +++ b/src/main.zig @@ -139,7 +139,7 @@ pub fn main() anyerror!void { const gpa = gpa: { if (!builtin.link_libc) { gpa_need_deinit = true; - break :gpa general_purpose_allocator.getAllocator(); + break :gpa general_purpose_allocator.allocator(); } // We would prefer to use raw libc allocator here, but cannot // use it if it won't support the alignment we need. @@ -153,7 +153,7 @@ pub fn main() anyerror!void { }; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); - const arena = arena_instance.getAllocator(); + const arena = arena_instance.allocator(); const args = try process.argsAlloc(arena); @@ -3619,7 +3619,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void var errors = std.ArrayList(Compilation.AllErrors.Message).init(gpa); defer errors.deinit(); - try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file); + try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file); const ttyconf: std.debug.TTY.Config = switch (color) { .auto => std.debug.detectTTYConfig(), .on => .escape_codes, @@ -3818,7 +3818,7 @@ fn fmtPathFile( var errors = std.ArrayList(Compilation.AllErrors.Message).init(fmt.gpa); defer errors.deinit(); - try Compilation.AllErrors.addZir(arena_instance.getAllocator(), &errors, &file); + try Compilation.AllErrors.addZir(arena_instance.allocator(), &errors, &file); const ttyconf: std.debug.TTY.Config = switch (fmt.color) { .auto => std.debug.detectTTYConfig(), .on => .escape_codes, diff --git a/src/mingw.zig b/src/mingw.zig index 6f02ebf39525..264740c33358 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { } var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); switch (crt_file) { .crt2_o => { @@ -281,7 +281,7 @@ fn add_cc_args( pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const def_file_path = findDef(comp, arena, lib_name) catch |err| switch (err) { error.FileNotFound => { diff --git a/src/musl.zig b/src/musl.zig index 7c3957fdd744..cad6246c98c4 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -25,7 +25,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); switch (crt_file) { .crti_o => { diff --git a/src/print_air.zig b/src/print_air.zig index ce53a26aeb78..3e503735b94a 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -47,7 +47,7 @@ pub fn dump(gpa: Allocator, air: Air, zir: Zir, liveness: Liveness) void { var writer: Writer = .{ .gpa = gpa, - .arena = arena.getAllocator(), + .arena = arena.allocator(), .air = air, .zir = zir, .liveness = liveness, diff --git a/src/print_zir.zig b/src/print_zir.zig index 996898b4ace2..401d41cd506a 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -19,7 +19,7 @@ pub fn renderAsTextToFile( var writer: Writer = .{ .gpa = gpa, - .arena = arena.getAllocator(), + .arena = arena.allocator(), .file = scope_file, .code = scope_file.zir, .indent = 0, @@ -74,7 +74,7 @@ pub fn renderInstructionContext( var writer: Writer = .{ .gpa = gpa, - .arena = arena.getAllocator(), + .arena = arena.allocator(), .file = scope_file, .code = scope_file.zir, .indent = if (indent < 2) 2 else indent, @@ -106,7 +106,7 @@ pub fn renderSingleInstruction( var writer: Writer = .{ .gpa = gpa, - .arena = arena.getAllocator(), + .arena = arena.allocator(), .file = scope_file, .code = scope_file.zir, .indent = indent, diff --git a/src/stage1.zig b/src/stage1.zig index 810dcc477b93..8e6090af0b97 100644 --- a/src/stage1.zig +++ b/src/stage1.zig @@ -38,7 +38,7 @@ pub fn main(argc: c_int, argv: [*][*:0]u8) callconv(.C) c_int { const gpa = std.heap.c_allocator; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); - const arena = arena_instance.getAllocator(); + const arena = arena_instance.allocator(); const args = arena.alloc([]const u8, @intCast(usize, argc)) catch fatal("{s}", .{"OutOfMemory"}); for (args) |*arg, i| { diff --git a/src/test.zig b/src/test.zig index 74147069e80f..44faea0ed962 100644 --- a/src/test.zig +++ b/src/test.zig @@ -692,7 +692,7 @@ pub const TestContext = struct { var arena_allocator = std.heap.ArenaAllocator.init(allocator); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); diff --git a/src/translate_c.zig b/src/translate_c.zig index 570059255004..03bb59469a48 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -373,7 +373,7 @@ pub fn translate( // from this function. var arena = std.heap.ArenaAllocator.init(gpa); errdefer arena.deinit(); - const arena_allocator = arena.getAllocator(); + const arena_allocator = arena.allocator(); var context = Context{ .gpa = gpa, diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig index b2235ad53ebe..fb60c98e5670 100644 --- a/src/wasi_libc.zig +++ b/src/wasi_libc.zig @@ -67,7 +67,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); defer arena_allocator.deinit(); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); switch (crt_file) { .crt1_reactor_o => { diff --git a/test/cli.zig b/test/cli.zig index 20a2143f51c3..d4afe417ce14 100644 --- a/test/cli.zig +++ b/test/cli.zig @@ -16,7 +16,7 @@ pub fn main() !void { // skip my own exe name _ = arg_it.skip(); - a = arena.getAllocator(); + a = arena.allocator(); const zig_exe_rel = try (arg_it.next(a) orelse { std.debug.print("Expected first argument to be path to zig compiler\n", .{}); diff --git a/test/compare_output.zig b/test/compare_output.zig index 46cbdd77f6ee..8a0bfc1ac76d 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -491,7 +491,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\pub fn main() !void { \\ var allocator_buf: [10]u8 = undefined; \\ var fixedBufferAllocator = std.mem.validationWrap(std.heap.FixedBufferAllocator.init(&allocator_buf)); - \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.getAllocator()).getAllocator(); + \\ const allocator = std.heap.loggingAllocator(fixedBufferAllocator.allocator()).allocator(); \\ \\ var a = try allocator.alloc(u8, 10); \\ a = allocator.shrink(a, 5); diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig index f0dea39ccbb0..6bc5501853a0 100644 --- a/test/standalone/brace_expansion/main.zig +++ b/test/standalone/brace_expansion/main.zig @@ -16,7 +16,7 @@ const Token = union(enum) { }; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; -const global_allocator = gpa.getAllocator(); +const global_allocator = gpa.allocator(); fn tokenize(input: []const u8) !ArrayList(Token) { const State = enum { diff --git a/test/standalone/cat/main.zig b/test/standalone/cat/main.zig index a8b16a05ca48..740e73a33e21 100644 --- a/test/standalone/cat/main.zig +++ b/test/standalone/cat/main.zig @@ -8,7 +8,7 @@ const warn = std.log.warn; pub fn main() !void { var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_instance.deinit(); - const arena = arena_instance.getAllocator(); + const arena = arena_instance.allocator(); const args = try process.argsAlloc(arena); diff --git a/tools/gen_spirv_spec.zig b/tools/gen_spirv_spec.zig index e4ad6927b2c5..465404e5d183 100644 --- a/tools/gen_spirv_spec.zig +++ b/tools/gen_spirv_spec.zig @@ -4,7 +4,7 @@ const g = @import("spirv/grammar.zig"); pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); if (args.len != 2) { diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig index a99d14752f31..56ea7715a7c2 100644 --- a/tools/gen_stubs.zig +++ b/tools/gen_stubs.zig @@ -25,7 +25,7 @@ pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const ally = arena.getAllocator(); + const ally = arena.allocator(); var symbols = std.ArrayList(Symbol).init(ally); var sections = std.ArrayList([]const u8).init(ally); diff --git a/tools/merge_anal_dumps.zig b/tools/merge_anal_dumps.zig index 93e067460557..7c77e76a0256 100644 --- a/tools/merge_anal_dumps.zig +++ b/tools/merge_anal_dumps.zig @@ -9,7 +9,7 @@ pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); diff --git a/tools/process_headers.zig b/tools/process_headers.zig index 1a90f99343eb..fea50b30dfa7 100644 --- a/tools/process_headers.zig +++ b/tools/process_headers.zig @@ -284,7 +284,7 @@ const LibCVendor = enum { pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); var search_paths = std.ArrayList([]const u8).init(allocator); var opt_out_dir: ?[]const u8 = null; diff --git a/tools/update-license-headers.zig b/tools/update-license-headers.zig index 83c47f1481fd..4e415784f8aa 100644 --- a/tools/update-license-headers.zig +++ b/tools/update-license-headers.zig @@ -10,7 +10,7 @@ pub fn main() !void { defer root_node.end(); var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const arena = arena_allocator.getAllocator(); + const arena = arena_allocator.allocator(); const args = try std.process.argsAlloc(arena); const path_to_walk = args[1]; diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index 90a96e057294..7360f9656006 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -450,13 +450,8 @@ const cpu_targets = struct { pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); -<<<<<<< HEAD - const allocator = &arena.allocator; -======= - - const allocator = arena.getAllocator(); ->>>>>>> 11157e318 (allocgate: stage 1 and 2 building) + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); if (args.len <= 1) { diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig index 70bc5a1c74ee..73c05d8cf197 100644 --- a/tools/update_cpu_features.zig +++ b/tools/update_cpu_features.zig @@ -769,7 +769,7 @@ const llvm_targets = [_]LlvmTarget{ pub fn main() anyerror!void { var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_state.deinit(); - const arena = arena_state.getAllocator(); + const arena = arena_state.allocator(); const args = try std.process.argsAlloc(arena); if (args.len <= 1) { @@ -845,7 +845,7 @@ fn processOneTarget(job: Job) anyerror!void { var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena_state.deinit(); - const arena = arena_state.getAllocator(); + const arena = arena_state.allocator(); var progress_node = job.root_progress.start(llvm_target.zig_name, 3); progress_node.activate(); diff --git a/tools/update_glibc.zig b/tools/update_glibc.zig index 7cccb47e1cb7..e450f8c6d48a 100644 --- a/tools/update_glibc.zig +++ b/tools/update_glibc.zig @@ -133,7 +133,7 @@ const Function = struct { pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); const in_glibc_dir = args[1]; // path to the unzipped tarball of glibc, e.g. ~/downloads/glibc-2.25 const zig_src_dir = args[2]; // path to the source checkout of zig, lib dir, e.g. ~/zig-src/lib diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig index 0c6c570a317c..8972ab641c73 100644 --- a/tools/update_spirv_features.zig +++ b/tools/update_spirv_features.zig @@ -48,7 +48,7 @@ const Version = struct { pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); - const allocator = arena.getAllocator(); + const allocator = arena.allocator(); const args = try std.process.argsAlloc(allocator); From 80bbf234e0d31266c72bb6e93db2d2199ac5920d Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Fri, 29 Oct 2021 03:00:00 +0100 Subject: [PATCH 05/10] allocgate: fix failing tests --- lib/std/heap/logging_allocator.zig | 4 ++-- lib/std/mem.zig | 4 ++-- lib/std/testing/failing_allocator.zig | 4 ++-- test/compile_errors.zig | 6 +++--- test/standalone/brace_expansion/main.zig | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index da9e731fd5e8..698b37da6c4e 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -53,7 +53,7 @@ pub fn ScopedLoggingAllocator( len_align: u29, ra: usize, ) error{OutOfMemory}![]u8 { - const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra); + const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra); if (result) |_| { logHelper( success_log_level, @@ -78,7 +78,7 @@ pub fn ScopedLoggingAllocator( len_align: u29, ra: usize, ) error{OutOfMemory}!usize { - if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ra)) |resized_len| { + if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len == 0) { logHelper(success_log_level, "free - success - len: {}", .{buf.len}); } else if (new_len <= buf.len) { diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 37b11412722c..79c739ca6222 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -40,9 +40,9 @@ pub fn ValidationAllocator(comptime T: type) type { underlying_allocator: T, - pub fn init(allocator: T) @This() { + pub fn init(underlying_allocator: T) @This() { return .{ - .underlying_allocator = allocator, + .underlying_allocator = underlying_allocator, }; } diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 15da5091fbe1..35eef3a98cba 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -28,9 +28,9 @@ pub const FailingAllocator = struct { /// var a = try failing_alloc.create(i32); /// var b = try failing_alloc.create(i32); /// testing.expectError(error.OutOfMemory, failing_alloc.create(i32)); - pub fn init(allocator: mem.Allocator, fail_index: usize) FailingAllocator { + pub fn init(internal_allocator: mem.Allocator, fail_index: usize) FailingAllocator { return FailingAllocator{ - .internal_allocator = allocator, + .internal_allocator = internal_allocator, .fail_index = fail_index, .index = 0, .allocated_bytes = 0, diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 3ed47432754b..8dfb44cf80d7 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -6550,9 +6550,9 @@ pub fn addCases(ctx: *TestContext) !void { ctx.objErrStage1("method call with first arg type wrong container", \\pub const List = struct { \\ len: usize, - \\ allocator: Allocator, + \\ allocator: *Allocator, \\ - \\ pub fn init(allocator: Allocator) List { + \\ pub fn init(allocator: *Allocator) List { \\ return List { \\ .len = 0, \\ .allocator = allocator, @@ -6573,7 +6573,7 @@ pub fn addCases(ctx: *TestContext) !void { \\ x.init(); \\} , &[_][]const u8{ - "tmp.zig:23:5: error: expected type 'Allocator', found '*List'", + "tmp.zig:23:5: error: expected type '*Allocator', found '*List'", }); ctx.objErrStage1("binary not on number literal", diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig index 6bc5501853a0..54590c65b22e 100644 --- a/test/standalone/brace_expansion/main.zig +++ b/test/standalone/brace_expansion/main.zig @@ -16,7 +16,7 @@ const Token = union(enum) { }; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; -const global_allocator = gpa.allocator(); +var global_allocator = gpa.allocator(); fn tokenize(input: []const u8) !ArrayList(Token) { const State = enum { From 9377f32c089a925d7e6f1c64c1ce7777d108213c Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Fri, 29 Oct 2021 04:17:21 +0100 Subject: [PATCH 06/10] allocgate: utilize a *const vtable field --- lib/std/heap.zig | 34 ++++++--- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/general_purpose_allocator.zig | 2 +- lib/std/heap/log_to_writer_allocator.zig | 4 +- lib/std/heap/logging_allocator.zig | 4 +- lib/std/mem.zig | 18 +++-- lib/std/mem/Allocator.zig | 88 ++++++++++++---------- lib/std/testing/failing_allocator.zig | 4 +- 8 files changed, 90 insertions(+), 66 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index c9a5062570a2..e005101c6b08 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -154,8 +154,11 @@ const CAllocator = struct { /// `malloc`/`free`, see `raw_c_allocator`. pub const c_allocator = Allocator{ .ptr = undefined, - .allocFn = CAllocator.alloc, - .resizeFn = CAllocator.resize, + .vtable = &c_allocator_vtable, +}; +const c_allocator_vtable = Allocator.VTable{ + .alloc = CAllocator.alloc, + .resize = CAllocator.resize, }; /// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls @@ -165,8 +168,11 @@ pub const c_allocator = Allocator{ /// than `c_allocator`. pub const raw_c_allocator = Allocator{ .ptr = undefined, - .allocFn = rawCAlloc, - .resizeFn = rawCResize, + .vtable = &raw_c_allocator_vtable, +}; +const raw_c_allocator_vtable = Allocator.VTable{ + .alloc = rawCAlloc, + .resize = rawCResize, }; fn rawCAlloc( @@ -208,16 +214,14 @@ fn rawCResize( pub const page_allocator = if (builtin.target.isWasm()) Allocator{ .ptr = undefined, - .allocFn = WasmPageAllocator.alloc, - .resizeFn = WasmPageAllocator.resize, + .vtable = &WasmPageAllocator.vtable, } else if (builtin.target.os.tag == .freestanding) root.os.heap.page_allocator else Allocator{ .ptr = undefined, - .allocFn = PageAllocator.alloc, - .resizeFn = PageAllocator.resize, + .vtable = &PageAllocator.vtable, }; /// Verifies that the adjusted length will still map to the full length @@ -231,6 +235,11 @@ pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize { pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null; const PageAllocator = struct { + const vtable = Allocator.VTable{ + .alloc = alloc, + .resize = resize, + }; + fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { _ = ra; assert(n > 0); @@ -400,6 +409,11 @@ const WasmPageAllocator = struct { } } + const vtable = Allocator.VTable{ + .alloc = alloc, + .resize = resize, + }; + const PageStatus = enum(u1) { used = 0, free = 1, @@ -807,7 +821,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { return_address: usize, ) error{OutOfMemory}![]u8 { return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch - return self.fallback_allocator.allocFn(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address); + return self.fallback_allocator.vtable.alloc(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address); } fn resize( @@ -821,7 +835,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address); } else { - return self.fallback_allocator.resizeFn(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address); + return self.fallback_allocator.vtable.resize(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address); } } }; diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c5d7d5ec9db6..35e3f0ada29d 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -47,7 +47,7 @@ pub const ArenaAllocator = struct { const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); const big_enough_len = prev_len + actual_min_size; const len = big_enough_len + big_enough_len / 2; - const buf = try self.child_allocator.allocFn(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress()); + const buf = try self.child_allocator.vtable.alloc(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress()); const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr)); buf_node.* = BufNode{ .data = buf, diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 822db6fb1b96..2e9eab0fd209 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var it = self.large_allocations.iterator(); while (it.next()) |large| { if (large.value_ptr.freed) { - _ = self.backing_allocator.resizeFn(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable; + _ = self.backing_allocator.vtable.resize(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable; } } } diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index cab17243126b..5019a015bcba 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -29,7 +29,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { ra: usize, ) error{OutOfMemory}![]u8 { self.writer.print("alloc : {}", .{len}) catch {}; - const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra); + const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra); if (result) |_| { self.writer.print(" success!\n", .{}) catch {}; } else |_| { @@ -53,7 +53,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { } else { self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } - if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { + if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len > buf.len) { self.writer.print(" success!\n", .{}) catch {}; } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 698b37da6c4e..21c3546b11eb 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -53,7 +53,7 @@ pub fn ScopedLoggingAllocator( len_align: u29, ra: usize, ) error{OutOfMemory}![]u8 { - const result = self.parent_allocator.allocFn(self.parent_allocator.ptr, len, ptr_align, len_align, ra); + const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra); if (result) |_| { logHelper( success_log_level, @@ -78,7 +78,7 @@ pub fn ScopedLoggingAllocator( len_align: u29, ra: usize, ) error{OutOfMemory}!usize { - if (self.parent_allocator.resizeFn(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { + if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len == 0) { logHelper(success_log_level, "free - success - len: {}", .{buf.len}); } else if (new_len <= buf.len) { diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 79c739ca6222..0594873514ac 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -70,7 +70,7 @@ pub fn ValidationAllocator(comptime T: type) type { } const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.allocFn(underlying.ptr, n, ptr_align, len_align, ret_addr); + const result = try underlying.vtable.alloc(underlying.ptr, n, ptr_align, len_align, ret_addr); assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); if (len_align == 0) { assert(result.len == n); @@ -95,7 +95,7 @@ pub fn ValidationAllocator(comptime T: type) type { assert(new_len >= len_align); } const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.resizeFn(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr); + const result = try underlying.vtable.resize(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr); if (len_align == 0) { assert(result == new_len); } else { @@ -131,10 +131,14 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize { return adjusted; } -const failAllocator = Allocator{ +const fail_allocator = Allocator{ .ptr = undefined, - .allocFn = failAllocatorAlloc, - .resizeFn = Allocator.NoResize(c_void).noResize, + .vtable = &failAllocator_vtable, +}; + +const failAllocator_vtable = Allocator.VTable{ + .alloc = failAllocatorAlloc, + .resize = Allocator.NoResize(c_void).noResize, }; fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 { @@ -146,8 +150,8 @@ fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: } test "mem.Allocator basics" { - try testing.expectError(error.OutOfMemory, failAllocator.alloc(u8, 1)); - try testing.expectError(error.OutOfMemory, failAllocator.allocSentinel(u8, 1, 0)); + try testing.expectError(error.OutOfMemory, fail_allocator.alloc(u8, 1)); + try testing.expectError(error.OutOfMemory, fail_allocator.allocSentinel(u8, 1, 0)); } test "Allocator.resize" { diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 8478120b003a..9e5957c9366b 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -10,39 +10,42 @@ pub const Error = error{OutOfMemory}; // The type erased pointer to the allocator implementation ptr: *c_void, - -/// Attempt to allocate at least `len` bytes aligned to `ptr_align`. -/// -/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, -/// otherwise, the length must be aligned to `len_align`. -/// -/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`. -/// -/// `ret_addr` is optionally provided as the first return address of the allocation call stack. -/// If the value is `0` it means no return address has been provided. -allocFn: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, - -/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent -/// length returned by `allocFn` or `resizeFn`. `buf_align` must equal the same value -/// that was passed as the `ptr_align` parameter to the original `allocFn` call. -/// -/// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no -/// longer be passed to `resizeFn`. -/// -/// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`. -/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be -/// unmodified and error.OutOfMemory MUST be returned. -/// -/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, -/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not* -/// provide a way to modify the alignment of a pointer. Rather it provides an API for -/// accepting more bytes of memory from the allocator than requested. -/// -/// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`. -/// -/// `ret_addr` is optionally provided as the first return address of the allocation call stack. -/// If the value is `0` it means no return address has been provided. -resizeFn: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, +vtable: *const VTable, + +pub const VTable = struct { + /// Attempt to allocate at least `len` bytes aligned to `ptr_align`. + /// + /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, + /// otherwise, the length must be aligned to `len_align`. + /// + /// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`. + /// + /// `ret_addr` is optionally provided as the first return address of the allocation call stack. + /// If the value is `0` it means no return address has been provided. + alloc: fn (ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, + + /// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent + /// length returned by `alloc` or `resize`. `buf_align` must equal the same value + /// that was passed as the `ptr_align` parameter to the original `alloc` call. + /// + /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no + /// longer be passed to `resize`. + /// + /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`. + /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be + /// unmodified and error.OutOfMemory MUST be returned. + /// + /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, + /// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not* + /// provide a way to modify the alignment of a pointer. Rather it provides an API for + /// accepting more bytes of memory from the allocator than requested. + /// + /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`. + /// + /// `ret_addr` is optionally provided as the first return address of the allocation call stack. + /// If the value is `0` it means no return address has been provided. + resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, +}; pub fn init( pointer: anytype, @@ -64,11 +67,14 @@ pub fn init( return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr); } }; + const vtable = VTable{ + .alloc = gen.alloc, + .resize = gen.resize, + }; return .{ .ptr = pointer, - .allocFn = gen.alloc, - .resizeFn = gen.resize, + .vtable = &vtable, }; } @@ -141,7 +147,7 @@ fn reallocBytes( return_address: usize, ) Error![]u8 { if (old_mem.len == 0) { - const new_mem = try self.allocFn(self.ptr, new_byte_count, new_alignment, len_align, return_address); + const new_mem = try self.vtable.alloc(self.ptr, new_byte_count, new_alignment, len_align, return_address); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(new_mem.ptr, undefined, new_byte_count); return new_mem; @@ -152,7 +158,7 @@ fn reallocBytes( const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address); return old_mem.ptr[0..shrunk_len]; } - if (self.resizeFn(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { + if (self.vtable.resize(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { assert(resized_len >= new_byte_count); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); @@ -178,7 +184,7 @@ fn moveBytes( ) Error![]u8 { assert(old_mem.len > 0); assert(new_len > 0); - const new_mem = try self.allocFn(self.ptr, new_len, new_alignment, len_align, return_address); + const new_mem = try self.vtable.alloc(self.ptr, new_len, new_alignment, len_align, return_address); @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len)); // TODO https://github.com/ziglang/zig/issues/4298 @memset(old_mem.ptr, undefined, old_mem.len); @@ -320,7 +326,7 @@ pub fn allocAdvancedWithRetAddr( .exact => 0, .at_least => size_of_T, }; - const byte_slice = try self.allocFn(self.ptr, byte_count, a, len_align, return_address); + const byte_slice = try self.vtable.alloc(self.ptr, byte_count, a, len_align, return_address); switch (exact) { .exact => assert(byte_slice.len == byte_count), .at_least => assert(byte_slice.len >= byte_count), @@ -345,7 +351,7 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old } const old_byte_slice = mem.sliceAsBytes(old_mem); const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; - const rc = try self.resizeFn(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); + const rc = try self.vtable.resize(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); assert(rc == new_byte_count); const new_byte_slice = old_byte_slice.ptr[0..new_byte_count]; return mem.bytesAsSlice(T, new_byte_slice); @@ -514,5 +520,5 @@ pub fn shrinkBytes( return_address: usize, ) usize { assert(new_len <= buf.len); - return self.resizeFn(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable; + return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable; } diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 35eef3a98cba..c26163a6f3d7 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -54,7 +54,7 @@ pub const FailingAllocator = struct { if (self.index == self.fail_index) { return error.OutOfMemory; } - const result = try self.internal_allocator.allocFn(self.internal_allocator.ptr, len, ptr_align, len_align, return_address); + const result = try self.internal_allocator.vtable.alloc(self.internal_allocator.ptr, len, ptr_align, len_align, return_address); self.allocated_bytes += result.len; self.allocations += 1; self.index += 1; @@ -69,7 +69,7 @@ pub const FailingAllocator = struct { len_align: u29, ra: usize, ) error{OutOfMemory}!usize { - const r = self.internal_allocator.resizeFn(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| { + const r = self.internal_allocator.vtable.resize(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| { std.debug.assert(new_len > old_mem.len); return e; }; From 02e5e0ba1fec38a3f8ed20e24219966f944a4ec2 Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Sun, 31 Oct 2021 21:45:27 +0000 Subject: [PATCH 07/10] allocgate: apply missed changes --- doc/langref.html.in | 2 +- lib/std/heap/general_purpose_allocator.zig | 14 ++++++++------ lib/std/mem/Allocator.zig | 6 +++--- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index b2f211468eae..117c533f4059 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -10200,7 +10200,7 @@ test "string literal to constant slice" { {#header_open|Implementing an Allocator#}

    Zig programmers can implement their own allocators by fulfilling the Allocator interface. In order to do this one must read carefully the documentation comments in std/mem.zig and - then supply a {#syntax#}reallocFn{#endsyntax#} and a {#syntax#}shrinkFn{#endsyntax#}. + then supply a {#syntax#}allocFn{#endsyntax#} and a {#syntax#}resizeFn{#endsyntax#}.

    There are many example allocators to look at for inspiration. Look at std/heap.zig and diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 2e9eab0fd209..fa2536cfaa2f 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -555,7 +555,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { // Do memory limit accounting with requested sizes rather than what backing_allocator returns // because if we want to return error.OutOfMemory, we have to leave allocation untouched, and - // that is impossible to guarantee after calling backing_allocator.resizeFn. + // that is impossible to guarantee after calling backing_allocator.vtable.resize. const prev_req_bytes = self.total_requested_bytes; if (config.enable_memory_limit) { const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size; @@ -571,7 +571,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const result_len = if (config.never_unmap and new_size == 0) 0 else - try self.backing_allocator.resizeFn(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr); + try self.backing_allocator.vtable.resize(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr); if (config.enable_memory_limit) { entry.value_ptr.requested_size = new_size; @@ -764,7 +764,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const new_aligned_size = math.max(len, ptr_align); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); - const slice = try self.backing_allocator.allocFn(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr); + const slice = try self.backing_allocator.vtable.alloc(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr); const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr)); if (config.retain_metadata and !config.never_unmap) { @@ -1191,10 +1191,12 @@ test "double frees" { test "bug 9995 fix, large allocs count requested size not backing size" { // with AtLeast, buffer likely to be larger than requested, especially when shrinking var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; - var buf = try gpa.allocator.allocAdvanced(u8, 1, page_size + 1, .at_least); + const allocator = gpa.allocator(); + + var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least); try std.testing.expect(gpa.total_requested_bytes == page_size + 1); - buf = try gpa.allocator.reallocAtLeast(buf, 1); + buf = try allocator.reallocAtLeast(buf, 1); try std.testing.expect(gpa.total_requested_bytes == 1); - buf = try gpa.allocator.reallocAtLeast(buf, 2); + buf = try allocator.reallocAtLeast(buf, 2); try std.testing.expect(gpa.total_requested_bytes == 2); } diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 9e5957c9366b..df3974f795ec 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -108,7 +108,7 @@ pub fn NoResize(comptime AllocatorType: type) type { /// When the size/alignment is less than or equal to the previous allocation, /// this function returns `error.OutOfMemory` when the allocator decides the client /// would be better off keeping the extra alignment/size. Clients will call -/// `resizeFn` when they require the allocator to track a new alignment/size, +/// `vtable.resize` when they require the allocator to track a new alignment/size, /// and so this function should only return success when the allocator considers /// the reallocation desirable from the allocator's perspective. /// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle @@ -124,7 +124,7 @@ pub fn NoResize(comptime AllocatorType: type) type { fn reallocBytes( self: Allocator, /// Guaranteed to be the same as what was returned from most recent call to - /// `allocFn` or `resizeFn`. + /// `vtable.alloc` or `vtable.resize`. /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count` /// is guaranteed to be >= 1. old_mem: []u8, @@ -507,7 +507,7 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T { return new_buf[0..m.len :0]; } -/// Call `resizeFn`, but caller guarantees that `new_len` <= `buf.len` meaning +/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning /// error.OutOfMemory should be impossible. /// This function allows a runtime `buf_align` value. Callers should generally prefer /// to call `shrink` directly. From 23866b1f81010277b204d6f3f5db23d020a76400 Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Wed, 3 Nov 2021 12:49:31 +0000 Subject: [PATCH 08/10] allocgate: update code to use new interface --- lib/std/heap/general_purpose_allocator.zig | 2 +- src/tracy.zig | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index fa2536cfaa2f..be7651980cfb 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -1192,7 +1192,7 @@ test "bug 9995 fix, large allocs count requested size not backing size" { // with AtLeast, buffer likely to be larger than requested, especially when shrinking var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){}; const allocator = gpa.allocator(); - + var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least); try std.testing.expect(gpa.total_requested_bytes == page_size + 1); buf = try allocator.reallocAtLeast(buf, 1); diff --git a/src/tracy.zig b/src/tracy.zig index 8abd78110fd7..83e31e57640d 100644 --- a/src/tracy.zig +++ b/src/tracy.zig @@ -109,11 +109,14 @@ pub fn tracyAllocator(allocator: std.mem.Allocator) TracyAllocator(null) { pub fn TracyAllocator(comptime name: ?[:0]const u8) type { return struct { - allocator: std.mem.Allocator, parent_allocator: std.mem.Allocator, const Self = @This(); + pub fn allocator(self: *Self) std.mem.Allocator { + return std.mem.Allocator.init(self, allocFn, resizeFn); + } + pub fn init(allocator: std.mem.Allocator) Self { return .{ .parent_allocator = allocator, @@ -124,8 +127,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { }; } - fn allocFn(allocator: std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 { - const self = @fieldParentPtr(Self, "allocator", allocator); + fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 { const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ret_addr); if (result) |data| { if (data.len != 0) { @@ -141,9 +143,7 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { return result; } - fn resizeFn(allocator: std.mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize { - const self = @fieldParentPtr(Self, "allocator", allocator); - + fn resizeFn(self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize { if (self.parent_allocator.resizeFn(self.parent_allocator, buf, buf_align, new_len, len_align, ret_addr)) |resized_len| { // this condition is to handle free being called on an empty slice that was never even allocated // example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}` From f68cda738ad0d3e9bc0f328befad301d9e23756e Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Sat, 6 Nov 2021 00:54:35 +0000 Subject: [PATCH 09/10] allocgate: split free out from resize --- lib/std/heap.zig | 141 +++++++++--- lib/std/heap/arena_allocator.zig | 16 +- lib/std/heap/general_purpose_allocator.zig | 245 ++++++++++++++------- lib/std/heap/log_to_writer_allocator.zig | 20 +- lib/std/heap/logging_allocator.zig | 20 +- lib/std/mem.zig | 20 +- lib/std/mem/Allocator.zig | 114 ++++++++-- lib/std/testing/failing_allocator.zig | 22 +- src/tracy.zig | 20 +- 9 files changed, 458 insertions(+), 160 deletions(-) diff --git a/lib/std/heap.zig b/lib/std/heap.zig index e005101c6b08..2d3a96676d45 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -132,10 +132,6 @@ const CAllocator = struct { ) Allocator.Error!usize { _ = buf_align; _ = return_address; - if (new_len == 0) { - alignedFree(buf.ptr); - return 0; - } if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } @@ -147,6 +143,17 @@ const CAllocator = struct { } return error.OutOfMemory; } + + fn free( + _: *c_void, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + alignedFree(buf.ptr); + } }; /// Supports the full Allocator interface, including alignment, and exploiting @@ -159,6 +166,7 @@ pub const c_allocator = Allocator{ const c_allocator_vtable = Allocator.VTable{ .alloc = CAllocator.alloc, .resize = CAllocator.resize, + .free = CAllocator.free, }; /// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls @@ -173,6 +181,7 @@ pub const raw_c_allocator = Allocator{ const raw_c_allocator_vtable = Allocator.VTable{ .alloc = rawCAlloc, .resize = rawCResize, + .free = rawCFree, }; fn rawCAlloc( @@ -199,16 +208,23 @@ fn rawCResize( ) Allocator.Error!usize { _ = old_align; _ = ret_addr; - if (new_len == 0) { - c.free(buf.ptr); - return 0; - } if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } return error.OutOfMemory; } +fn rawCFree( + _: *c_void, + buf: []u8, + old_align: u29, + ret_addr: usize, +) void { + _ = old_align; + _ = ret_addr; + c.free(buf.ptr); +} + /// This allocator makes a syscall directly for every allocation and free. /// Thread-safe and lock-free. pub const page_allocator = if (builtin.target.isWasm()) @@ -238,6 +254,7 @@ const PageAllocator = struct { const vtable = Allocator.VTable{ .alloc = alloc, .resize = resize, + .free = free, }; fn alloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 { @@ -351,16 +368,6 @@ const PageAllocator = struct { if (builtin.os.tag == .windows) { const w = os.windows; - if (new_size == 0) { - // From the docs: - // "If the dwFreeType parameter is MEM_RELEASE, this parameter - // must be 0 (zero). The function frees the entire region that - // is reserved in the initial allocation call to VirtualAlloc." - // So we can only use MEM_RELEASE when actually releasing the - // whole allocation. - w.VirtualFree(buf_unaligned.ptr, 0, w.MEM_RELEASE); - return 0; - } if (new_size <= buf_unaligned.len) { const base_addr = @ptrToInt(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; @@ -391,8 +398,6 @@ const PageAllocator = struct { const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned); // TODO: if the next_mmap_addr_hint is within the unmapped range, update it os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]); - if (new_size_aligned == 0) - return 0; return alignPageAllocLen(new_size_aligned, new_size, len_align); } @@ -400,6 +405,19 @@ const PageAllocator = struct { // TODO: if the next_mmap_addr_hint is within the remapped range, update it return error.OutOfMemory; } + + fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void { + _ = buf_align; + _ = return_address; + + if (builtin.os.tag == .windows) { + os.windows.VirtualFree(buf_unaligned.ptr, 0, os.windows.MEM_RELEASE); + } else { + const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + const ptr = @alignCast(mem.page_size, buf_unaligned.ptr); + os.munmap(ptr[0..buf_aligned_len]); + } + } }; const WasmPageAllocator = struct { @@ -412,6 +430,7 @@ const WasmPageAllocator = struct { const vtable = Allocator.VTable{ .alloc = alloc, .resize = resize, + .free = free, }; const PageStatus = enum(u1) { @@ -571,7 +590,21 @@ const WasmPageAllocator = struct { const base = nPages(@ptrToInt(buf.ptr)); freePages(base + new_n, base + current_n); } - return if (new_len == 0) 0 else alignPageAllocLen(new_n * mem.page_size, new_len, len_align); + return alignPageAllocLen(new_n * mem.page_size, new_len, len_align); + } + + fn free( + _: *c_void, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + const aligned_len = mem.alignForward(buf.len, mem.page_size); + const current_n = nPages(aligned_len); + const base = nPages(@ptrToInt(buf.ptr)); + freePages(base, base + current_n); } }; @@ -588,7 +621,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { } pub fn allocator(self: *HeapAllocator) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } pub fn deinit(self: *HeapAllocator) void { @@ -644,10 +677,6 @@ pub const HeapAllocator = switch (builtin.os.tag) { ) error{OutOfMemory}!usize { _ = buf_align; _ = return_address; - if (new_size == 0) { - os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*)); - return 0; - } const root_addr = getRecordPtr(buf).*; const align_offset = @ptrToInt(buf.ptr) - root_addr; @@ -669,6 +698,17 @@ pub const HeapAllocator = switch (builtin.os.tag) { getRecordPtr(buf.ptr[0..return_len]).* = root_addr; return return_len; } + + fn free( + self: *HeapAllocator, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*)); + } }, else => @compileError("Unsupported OS"), }; @@ -696,13 +736,18 @@ pub const FixedBufferAllocator = struct { /// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe pub fn allocator(self: *FixedBufferAllocator) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } /// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` /// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator { - return Allocator.init(self, threadSafeAlloc, Allocator.NoResize(FixedBufferAllocator).noResize); + return Allocator.init( + self, + threadSafeAlloc, + Allocator.NoResize(FixedBufferAllocator).noResize, + Allocator.NoOpFree(FixedBufferAllocator).noOpFree, + ); } pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool { @@ -715,7 +760,7 @@ pub const FixedBufferAllocator = struct { /// NOTE: this will not work in all cases, if the last allocation had an adjusted_index /// then we won't be able to determine what the last allocation was. This is because - /// the alignForward operation done in alloc is not reverisible. + /// the alignForward operation done in alloc is not reversible. pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { return buf.ptr + buf.len == self.buffer.ptr + self.end_index; } @@ -751,13 +796,13 @@ pub const FixedBufferAllocator = struct { if (!self.isLastAllocation(buf)) { if (new_size > buf.len) return error.OutOfMemory; - return if (new_size == 0) 0 else mem.alignAllocLen(buf.len, new_size, len_align); + return mem.alignAllocLen(buf.len, new_size, len_align); } if (new_size <= buf.len) { const sub = buf.len - new_size; self.end_index -= sub; - return if (new_size == 0) 0 else mem.alignAllocLen(buf.len - sub, new_size, len_align); + return mem.alignAllocLen(buf.len - sub, new_size, len_align); } const add = new_size - buf.len; @@ -768,6 +813,21 @@ pub const FixedBufferAllocator = struct { return new_size; } + fn free( + self: *FixedBufferAllocator, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + _ = buf_align; + _ = return_address; + assert(self.ownsSlice(buf)); // sanity check + + if (self.isLastAllocation(buf)) { + self.end_index -= buf.len; + } + } + fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 { _ = len_align; _ = ra; @@ -810,7 +870,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { /// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator pub fn get(self: *Self) Allocator { self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } fn alloc( @@ -821,7 +881,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { return_address: usize, ) error{OutOfMemory}![]u8 { return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch - return self.fallback_allocator.vtable.alloc(self.fallback_allocator.ptr, len, ptr_align, len_align, return_address); + return self.fallback_allocator.rawAlloc(len, ptr_align, len_align, return_address); } fn resize( @@ -835,7 +895,20 @@ pub fn StackFallbackAllocator(comptime size: usize) type { if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address); } else { - return self.fallback_allocator.vtable.resize(self.fallback_allocator.ptr, buf, buf_align, new_len, len_align, return_address); + return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address); + } + } + + fn free( + self: *Self, + buf: []u8, + buf_align: u29, + return_address: usize, + ) void { + if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, buf_align, return_address); + } else { + return self.fallback_allocator.rawFree(buf, buf_align, return_address); } } }; diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index 35e3f0ada29d..c5a8d6bc7ea5 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -24,7 +24,7 @@ pub const ArenaAllocator = struct { }; pub fn allocator(self: *ArenaAllocator) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } const BufNode = std.SinglyLinkedList([]u8).Node; @@ -47,7 +47,7 @@ pub const ArenaAllocator = struct { const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16); const big_enough_len = prev_len + actual_min_size; const len = big_enough_len + big_enough_len / 2; - const buf = try self.child_allocator.vtable.alloc(self.child_allocator.ptr, len, @alignOf(BufNode), 1, @returnAddress()); + const buf = try self.child_allocator.rawAlloc(len, @alignOf(BufNode), 1, @returnAddress()); const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr)); buf_node.* = BufNode{ .data = buf, @@ -111,4 +111,16 @@ pub const ArenaAllocator = struct { return error.OutOfMemory; } } + + fn free(self: *ArenaAllocator, buf: []u8, buf_align: u29, ret_addr: usize) void { + _ = buf_align; + _ = ret_addr; + + const cur_node = self.state.buffer_list.first orelse return; + const cur_buf = cur_node.data[@sizeOf(BufNode)..]; + + if (@ptrToInt(cur_buf.ptr) + self.state.end_index == @ptrToInt(buf.ptr) + buf.len) { + self.state.end_index -= buf.len; + } + } }; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index be7651980cfb..5687b1efb274 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -281,7 +281,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { }; pub fn allocator(self: *Self) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } fn bucketStackTrace( @@ -388,7 +388,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { var it = self.large_allocations.iterator(); while (it.next()) |large| { if (large.value_ptr.freed) { - _ = self.backing_allocator.vtable.resize(self.backing_allocator.ptr, large.value_ptr.bytes, large.value_ptr.ptr_align, 0, 0, @returnAddress()) catch unreachable; + self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.ptr_align, @returnAddress()); } } } @@ -529,9 +529,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (config.retain_metadata and entry.value_ptr.freed) { if (config.safety) { reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free)); - // Recoverable if this is a free. - if (new_size == 0) - return @as(usize, 0); @panic("Unrecoverable double free"); } else { unreachable; @@ -555,7 +552,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { // Do memory limit accounting with requested sizes rather than what backing_allocator returns // because if we want to return error.OutOfMemory, we have to leave allocation untouched, and - // that is impossible to guarantee after calling backing_allocator.vtable.resize. + // that is impossible to guarantee after calling backing_allocator.rawResize. const prev_req_bytes = self.total_requested_bytes; if (config.enable_memory_limit) { const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size; @@ -568,29 +565,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.total_requested_bytes = prev_req_bytes; }; - const result_len = if (config.never_unmap and new_size == 0) - 0 - else - try self.backing_allocator.vtable.resize(self.backing_allocator.ptr, old_mem, old_align, new_size, len_align, ret_addr); + const result_len = try self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr); if (config.enable_memory_limit) { entry.value_ptr.requested_size = new_size; } - if (result_len == 0) { - if (config.verbose_log) { - log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); - } - - if (!config.retain_metadata) { - assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr))); - } else { - entry.value_ptr.freed = true; - entry.value_ptr.captureStackTrace(ret_addr, .free); - } - return 0; - } - if (config.verbose_log) { log.info("large resize {d} bytes at {*} to {d}", .{ old_mem.len, old_mem.ptr, new_size, @@ -601,6 +581,64 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return result_len; } + /// This function assumes the object is in the large object storage regardless + /// of the parameters. + fn freeLarge( + self: *Self, + old_mem: []u8, + old_align: u29, + ret_addr: usize, + ) void { + _ = old_align; + + const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse { + if (config.safety) { + @panic("Invalid free"); + } else { + unreachable; + } + }; + + if (config.retain_metadata and entry.value_ptr.freed) { + if (config.safety) { + reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free)); + return; + } else { + unreachable; + } + } + + if (config.safety and old_mem.len != entry.value_ptr.bytes.len) { + var addresses: [stack_n]usize = [1]usize{0} ** stack_n; + var free_stack_trace = StackTrace{ + .instruction_addresses = &addresses, + .index = 0, + }; + std.debug.captureStackTrace(ret_addr, &free_stack_trace); + log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {s} Free: {s}", .{ + entry.value_ptr.bytes.len, + old_mem.len, + entry.value_ptr.getStackTrace(.alloc), + free_stack_trace, + }); + } + + if (config.enable_memory_limit) { + self.total_requested_bytes -= entry.value_ptr.requested_size; + } + + if (config.verbose_log) { + log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); + } + + if (!config.retain_metadata) { + assert(self.large_allocations.remove(@ptrToInt(old_mem.ptr))); + } else { + entry.value_ptr.freed = true; + entry.value_ptr.captureStackTrace(ret_addr, .free); + } + } + pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void { self.requested_memory_limit = limit; } @@ -656,9 +694,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (!is_used) { if (config.safety) { reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); - // Recoverable if this is a free. - if (new_size == 0) - return @as(usize, 0); @panic("Unrecoverable double free"); } else { unreachable; @@ -678,52 +713,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.total_requested_bytes = prev_req_bytes; }; - if (new_size == 0) { - // Capture stack trace to be the "first free", in case a double free happens. - bucket.captureStackTrace(ret_addr, size_class, slot_index, .free); - - used_byte.* &= ~(@as(u8, 1) << used_bit_index); - bucket.used_count -= 1; - if (bucket.used_count == 0) { - if (bucket.next == bucket) { - // it's the only bucket and therefore the current one - self.buckets[bucket_index] = null; - } else { - bucket.next.prev = bucket.prev; - bucket.prev.next = bucket.next; - self.buckets[bucket_index] = bucket.prev; - } - if (!config.never_unmap) { - self.backing_allocator.free(bucket.page[0..page_size]); - } - if (!config.retain_metadata) { - self.freeBucket(bucket, size_class); - } else { - // move alloc_cursor to end so we can tell size_class later - const slot_count = @divExact(page_size, size_class); - bucket.alloc_cursor = @truncate(SlotIndex, slot_count); - if (self.empty_buckets) |prev_bucket| { - // empty_buckets is ordered newest to oldest through prev so that if - // config.never_unmap is false and backing_allocator reuses freed memory - // then searchBuckets will always return the newer, relevant bucket - bucket.prev = prev_bucket; - bucket.next = prev_bucket.next; - prev_bucket.next = bucket; - bucket.next.prev = bucket; - } else { - bucket.prev = bucket; - bucket.next = bucket; - } - self.empty_buckets = bucket; - } - } else { - @memset(old_mem.ptr, undefined, old_mem.len); - } - if (config.verbose_log) { - log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); - } - return @as(usize, 0); - } const new_aligned_size = math.max(new_size, old_align); const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size); if (new_size_class <= size_class) { @@ -740,6 +729,114 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return error.OutOfMemory; } + fn free( + self: *Self, + old_mem: []u8, + old_align: u29, + ret_addr: usize, + ) void { + const held = self.mutex.acquire(); + defer held.release(); + + assert(old_mem.len != 0); + + const aligned_size = math.max(old_mem.len, old_align); + if (aligned_size > largest_bucket_object_size) { + self.freeLarge(old_mem, old_align, ret_addr); + return; + } + const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size); + + var bucket_index = math.log2(size_class_hint); + var size_class: usize = size_class_hint; + const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { + if (searchBucket(self.buckets[bucket_index], @ptrToInt(old_mem.ptr))) |bucket| { + // move bucket to head of list to optimize search for nearby allocations + self.buckets[bucket_index] = bucket; + break bucket; + } + size_class *= 2; + } else blk: { + if (config.retain_metadata) { + if (!self.large_allocations.contains(@ptrToInt(old_mem.ptr))) { + // object not in active buckets or a large allocation, so search empty buckets + if (searchBucket(self.empty_buckets, @ptrToInt(old_mem.ptr))) |bucket| { + // bucket is empty so is_used below will always be false and we exit there + break :blk bucket; + } else { + @panic("Invalid free"); + } + } + } + self.freeLarge(old_mem, old_align, ret_addr); + return; + }; + const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page); + const slot_index = @intCast(SlotIndex, byte_offset / size_class); + const used_byte_index = slot_index / 8; + const used_bit_index = @intCast(u3, slot_index % 8); + const used_byte = bucket.usedBits(used_byte_index); + const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; + if (!is_used) { + if (config.safety) { + reportDoubleFree(ret_addr, bucketStackTrace(bucket, size_class, slot_index, .alloc), bucketStackTrace(bucket, size_class, slot_index, .free)); + // Recoverable if this is a free. + return; + } else { + unreachable; + } + } + + // Definitely an in-use small alloc now. + if (config.enable_memory_limit) { + self.total_requested_bytes -= old_mem.len; + } + + // Capture stack trace to be the "first free", in case a double free happens. + bucket.captureStackTrace(ret_addr, size_class, slot_index, .free); + + used_byte.* &= ~(@as(u8, 1) << used_bit_index); + bucket.used_count -= 1; + if (bucket.used_count == 0) { + if (bucket.next == bucket) { + // it's the only bucket and therefore the current one + self.buckets[bucket_index] = null; + } else { + bucket.next.prev = bucket.prev; + bucket.prev.next = bucket.next; + self.buckets[bucket_index] = bucket.prev; + } + if (!config.never_unmap) { + self.backing_allocator.free(bucket.page[0..page_size]); + } + if (!config.retain_metadata) { + self.freeBucket(bucket, size_class); + } else { + // move alloc_cursor to end so we can tell size_class later + const slot_count = @divExact(page_size, size_class); + bucket.alloc_cursor = @truncate(SlotIndex, slot_count); + if (self.empty_buckets) |prev_bucket| { + // empty_buckets is ordered newest to oldest through prev so that if + // config.never_unmap is false and backing_allocator reuses freed memory + // then searchBuckets will always return the newer, relevant bucket + bucket.prev = prev_bucket; + bucket.next = prev_bucket.next; + prev_bucket.next = bucket; + bucket.next.prev = bucket; + } else { + bucket.prev = bucket; + bucket.next = bucket; + } + self.empty_buckets = bucket; + } + } else { + @memset(old_mem.ptr, undefined, old_mem.len); + } + if (config.verbose_log) { + log.info("small free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr }); + } + } + // Returns true if an allocation of `size` bytes is within the specified // limits if enable_memory_limit is true fn isAllocationAllowed(self: *Self, size: usize) bool { @@ -764,7 +861,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { const new_aligned_size = math.max(len, ptr_align); if (new_aligned_size > largest_bucket_object_size) { try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1); - const slice = try self.backing_allocator.vtable.alloc(self.backing_allocator.ptr, len, ptr_align, len_align, ret_addr); + const slice = try self.backing_allocator.rawAlloc(len, ptr_align, len_align, ret_addr); const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr)); if (config.retain_metadata and !config.never_unmap) { diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index 5019a015bcba..fa8c19e0a068 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -18,7 +18,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { } pub fn allocator(self: *Self) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } fn alloc( @@ -29,7 +29,7 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { ra: usize, ) error{OutOfMemory}![]u8 { self.writer.print("alloc : {}", .{len}) catch {}; - const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra); + const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra); if (result) |_| { self.writer.print(" success!\n", .{}) catch {}; } else |_| { @@ -46,14 +46,12 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { len_align: u29, ra: usize, ) error{OutOfMemory}!usize { - if (new_len == 0) { - self.writer.print("free : {}\n", .{buf.len}) catch {}; - } else if (new_len <= buf.len) { + if (new_len <= buf.len) { self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } - if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { + if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len > buf.len) { self.writer.print(" success!\n", .{}) catch {}; } @@ -64,6 +62,16 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { return e; } } + + fn free( + self: *Self, + buf: []u8, + buf_align: u29, + ra: usize, + ) void { + self.writer.print("free : {}\n", .{buf.len}) catch {}; + self.parent_allocator.rawFree(buf, buf_align, ra); + } }; } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 21c3546b11eb..b631cd0b1b7e 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -33,7 +33,7 @@ pub fn ScopedLoggingAllocator( } pub fn allocator(self: *Self) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } // This function is required as the `std.log.log` function is not public @@ -53,7 +53,7 @@ pub fn ScopedLoggingAllocator( len_align: u29, ra: usize, ) error{OutOfMemory}![]u8 { - const result = self.parent_allocator.vtable.alloc(self.parent_allocator.ptr, len, ptr_align, len_align, ra); + const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra); if (result) |_| { logHelper( success_log_level, @@ -78,10 +78,8 @@ pub fn ScopedLoggingAllocator( len_align: u29, ra: usize, ) error{OutOfMemory}!usize { - if (self.parent_allocator.vtable.resize(self.parent_allocator.ptr, buf, buf_align, new_len, len_align, ra)) |resized_len| { - if (new_len == 0) { - logHelper(success_log_level, "free - success - len: {}", .{buf.len}); - } else if (new_len <= buf.len) { + if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| { + if (new_len <= buf.len) { logHelper( success_log_level, "shrink - success - {} to {}, len_align: {}, buf_align: {}", @@ -106,6 +104,16 @@ pub fn ScopedLoggingAllocator( return err; } } + + fn free( + self: *Self, + buf: []u8, + buf_align: u29, + ra: usize, + ) void { + self.parent_allocator.rawFree(buf, buf_align, ra); + logHelper(success_log_level, "free - len: {}", .{buf.len}); + } }; } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 0594873514ac..9c7ce3867b31 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -47,7 +47,7 @@ pub fn ValidationAllocator(comptime T: type) type { } pub fn allocator(self: *Self) Allocator { - return Allocator.init(self, alloc, resize); + return Allocator.init(self, alloc, resize, free); } fn getUnderlyingAllocatorPtr(self: *Self) Allocator { @@ -70,7 +70,7 @@ pub fn ValidationAllocator(comptime T: type) type { } const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.vtable.alloc(underlying.ptr, n, ptr_align, len_align, ret_addr); + const result = try underlying.rawAlloc(n, ptr_align, len_align, ret_addr); assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align)); if (len_align == 0) { assert(result.len == n); @@ -95,7 +95,7 @@ pub fn ValidationAllocator(comptime T: type) type { assert(new_len >= len_align); } const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.vtable.resize(underlying.ptr, buf, buf_align, new_len, len_align, ret_addr); + const result = try underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr); if (len_align == 0) { assert(result == new_len); } else { @@ -104,6 +104,19 @@ pub fn ValidationAllocator(comptime T: type) type { } return result; } + + pub fn free( + self: *Self, + buf: []u8, + buf_align: u29, + ret_addr: usize, + ) void { + _ = self; + _ = buf_align; + _ = ret_addr; + assert(buf.len > 0); + } + pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct { pub fn reset(self: *Self) void { self.underlying_allocator.reset(); @@ -139,6 +152,7 @@ const fail_allocator = Allocator{ const failAllocator_vtable = Allocator.VTable{ .alloc = failAllocatorAlloc, .resize = Allocator.NoResize(c_void).noResize, + .free = Allocator.NoOpFree(c_void).noOpFree, }; fn failAllocatorAlloc(_: *c_void, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 { diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index df3974f795ec..6edad7e05bd3 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -5,6 +5,7 @@ const assert = std.debug.assert; const math = std.math; const mem = std.mem; const Allocator = @This(); +const builtin = @import("builtin"); pub const Error = error{OutOfMemory}; @@ -28,9 +29,6 @@ pub const VTable = struct { /// length returned by `alloc` or `resize`. `buf_align` must equal the same value /// that was passed as the `ptr_align` parameter to the original `alloc` call. /// - /// Passing a `new_len` of 0 frees and invalidates the buffer such that it can no - /// longer be passed to `resize`. - /// /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`. /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be /// unmodified and error.OutOfMemory MUST be returned. @@ -40,36 +38,54 @@ pub const VTable = struct { /// provide a way to modify the alignment of a pointer. Rather it provides an API for /// accepting more bytes of memory from the allocator than requested. /// - /// `new_len` must be greater than or equal to `len_align` and must be aligned by `len_align`. + /// `new_len` must be greater than zero, greater than or equal to `len_align` and must be aligned by `len_align`. /// /// `ret_addr` is optionally provided as the first return address of the allocation call stack. /// If the value is `0` it means no return address has been provided. resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, + + /// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`. + /// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call. + /// + /// `ret_addr` is optionally provided as the first return address of the allocation call stack. + /// If the value is `0` it means no return address has been provided. + free: fn (ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void, }; pub fn init( pointer: anytype, comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, + comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void, ) Allocator { const Ptr = @TypeOf(pointer); - assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer - assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer + const ptr_info = @typeInfo(Ptr); + + assert(ptr_info == .Pointer); // Must be a pointer + assert(ptr_info.Pointer.size == .One); // Must be a single-item pointer + + const alignment = ptr_info.Pointer.alignment; + const gen = struct { fn alloc(ptr: *c_void, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { - const alignment = @typeInfo(Ptr).Pointer.alignment; const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); - return allocFn(self, len, ptr_align, len_align, ret_addr); + return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr }); } fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize { - const alignment = @typeInfo(Ptr).Pointer.alignment; + assert(new_len != 0); + const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); + return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr }); + } + fn free(ptr: *c_void, buf: []u8, buf_align: u29, ret_addr: usize) void { const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); - return resizeFn(self, buf, buf_align, new_len, len_align, ret_addr); + @call(.{ .modifier = .always_inline }, freeFn, .{ self, buf, buf_align, ret_addr }); } }; + const vtable = VTable{ .alloc = gen.alloc, .resize = gen.resize, + .free = gen.free, }; return .{ @@ -100,6 +116,56 @@ pub fn NoResize(comptime AllocatorType: type) type { }; } +/// Set freeFn to `NoOpFree(AllocatorType).noOpFree` if free is a no-op. +pub fn NoOpFree(comptime AllocatorType: type) type { + return struct { + pub fn noOpFree( + self: *AllocatorType, + buf: []u8, + buf_align: u29, + ret_addr: usize, + ) void { + _ = self; + _ = buf; + _ = buf_align; + _ = ret_addr; + } + }; +} + +/// Set freeFn to `PanicFree(AllocatorType).noOpFree` if free is not a supported operation. +pub fn PanicFree(comptime AllocatorType: type) type { + return struct { + pub fn noOpFree( + self: *AllocatorType, + buf: []u8, + buf_align: u29, + ret_addr: usize, + ) void { + _ = self; + _ = buf; + _ = buf_align; + _ = ret_addr; + @panic("free is not a supported operation for the allocator: " ++ @typeName(AllocatorType)); + } + }; +} + +/// This function is not intended to be called except from within the implementation of an Allocator +pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { + return self.vtable.alloc(self.ptr, len, ptr_align, len_align, ret_addr); +} + +/// This function is not intended to be called except from within the implementation of an Allocator +pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize { + return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr); +} + +/// This function is not intended to be called except from within the implementation of an Allocator +pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usize) void { + return self.vtable.free(self.ptr, buf, buf_align, ret_addr); +} + /// Realloc is used to modify the size or alignment of an existing allocation, /// as well as to provide the allocator with an opportunity to move an allocation /// to a better location. @@ -133,8 +199,7 @@ fn reallocBytes( /// Guaranteed to be >= 1. /// Guaranteed to be a power of 2. old_alignment: u29, - /// If `new_byte_count` is 0 then this is a free and it is guaranteed that - /// `old_mem.len != 0`. + /// `new_byte_count` must be greater than zero new_byte_count: usize, /// Guaranteed to be >= 1. /// Guaranteed to be a power of 2. @@ -147,18 +212,20 @@ fn reallocBytes( return_address: usize, ) Error![]u8 { if (old_mem.len == 0) { - const new_mem = try self.vtable.alloc(self.ptr, new_byte_count, new_alignment, len_align, return_address); + const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(new_mem.ptr, undefined, new_byte_count); return new_mem; } + assert(new_byte_count > 0); // `new_byte_count` must greater than zero, this is a resize not a free + if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { if (new_byte_count <= old_mem.len) { const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address); return old_mem.ptr[0..shrunk_len]; } - if (self.vtable.resize(self.ptr, old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { + if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { assert(resized_len >= new_byte_count); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); @@ -184,11 +251,11 @@ fn moveBytes( ) Error![]u8 { assert(old_mem.len > 0); assert(new_len > 0); - const new_mem = try self.vtable.alloc(self.ptr, new_len, new_alignment, len_align, return_address); + const new_mem = try self.rawAlloc(new_len, new_alignment, len_align, return_address); @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len)); // TODO https://github.com/ziglang/zig/issues/4298 @memset(old_mem.ptr, undefined, old_mem.len); - _ = self.shrinkBytes(old_mem, old_align, 0, 0, return_address); + self.rawFree(old_mem, old_align, return_address); return new_mem; } @@ -207,7 +274,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void { const T = info.child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); - _ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress()); + self.rawFree(non_const_ptr[0..@sizeOf(T)], info.alignment, @returnAddress()); } /// Allocates an array of `n` items of type `T` and sets all the @@ -326,7 +393,7 @@ pub fn allocAdvancedWithRetAddr( .exact => 0, .at_least => size_of_T, }; - const byte_slice = try self.vtable.alloc(self.ptr, byte_count, a, len_align, return_address); + const byte_slice = try self.rawAlloc(byte_count, a, len_align, return_address); switch (exact) { .exact => assert(byte_slice.len == byte_count), .at_least => assert(byte_slice.len >= byte_count), @@ -351,7 +418,7 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old } const old_byte_slice = mem.sliceAsBytes(old_mem); const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; - const rc = try self.vtable.resize(self.ptr, old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); + const rc = try self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); assert(rc == new_byte_count); const new_byte_slice = old_byte_slice.ptr[0..new_byte_count]; return mem.bytesAsSlice(T, new_byte_slice); @@ -465,6 +532,11 @@ pub fn alignedShrinkWithRetAddr( if (new_n == old_mem.len) return old_mem; + if (new_n == 0) { + self.free(old_mem); + return @as([*]align(new_alignment) T, undefined)[0..0]; + } + assert(new_n < old_mem.len); assert(new_alignment <= Slice.alignment); @@ -489,7 +561,7 @@ pub fn free(self: Allocator, memory: anytype) void { const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr)); // TODO: https://github.com/ziglang/zig/issues/4298 @memset(non_const_ptr, undefined, bytes_len); - _ = self.shrinkBytes(non_const_ptr[0..bytes_len], Slice.alignment, 0, 0, @returnAddress()); + self.rawFree(non_const_ptr[0..bytes_len], Slice.alignment, @returnAddress()); } /// Copies `m` to newly allocated memory. Caller owns the memory. @@ -520,5 +592,5 @@ pub fn shrinkBytes( return_address: usize, ) usize { assert(new_len <= buf.len); - return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, return_address) catch unreachable; + return self.rawResize(buf, buf_align, new_len, len_align, return_address) catch unreachable; } diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index c26163a6f3d7..5e1084037e80 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -41,7 +41,7 @@ pub const FailingAllocator = struct { } pub fn allocator(self: *FailingAllocator) mem.Allocator { - return mem.Allocator.init(self, alloc, resize); + return mem.Allocator.init(self, alloc, resize, free); } fn alloc( @@ -54,7 +54,7 @@ pub const FailingAllocator = struct { if (self.index == self.fail_index) { return error.OutOfMemory; } - const result = try self.internal_allocator.vtable.alloc(self.internal_allocator.ptr, len, ptr_align, len_align, return_address); + const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address); self.allocated_bytes += result.len; self.allocations += 1; self.index += 1; @@ -69,18 +69,26 @@ pub const FailingAllocator = struct { len_align: u29, ra: usize, ) error{OutOfMemory}!usize { - const r = self.internal_allocator.vtable.resize(self.internal_allocator.ptr, old_mem, old_align, new_len, len_align, ra) catch |e| { + const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) catch |e| { std.debug.assert(new_len > old_mem.len); return e; }; - if (new_len == 0) { - self.deallocations += 1; - self.freed_bytes += old_mem.len; - } else if (r < old_mem.len) { + if (r < old_mem.len) { self.freed_bytes += old_mem.len - r; } else { self.allocated_bytes += r - old_mem.len; } return r; } + + fn free( + self: *FailingAllocator, + old_mem: []u8, + old_align: u29, + ra: usize, + ) void { + self.internal_allocator.rawFree(old_mem, old_align, ra); + self.deallocations += 1; + self.freed_bytes += old_mem.len; + } }; diff --git a/src/tracy.zig b/src/tracy.zig index 83e31e57640d..064374030fec 100644 --- a/src/tracy.zig +++ b/src/tracy.zig @@ -155,13 +155,10 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } } - if (resized_len != 0) { - // this was a shrink or a resize - if (name) |n| { - allocNamed(buf.ptr, resized_len, n); - } else { - alloc(buf.ptr, resized_len); - } + if (name) |n| { + allocNamed(buf.ptr, resized_len, n); + } else { + alloc(buf.ptr, resized_len); } return resized_len; @@ -172,6 +169,15 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { return err; } } + + fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void { + self.parent_allocator.rawFree(buf, buf_align, ret_addr); + if (name) |n| { + freeNamed(buf.ptr, n); + } else { + free(buf.ptr); + } + } }; } From 066eaa5e9cbfde172449f6d95bb884c7d86ac10c Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Sun, 7 Nov 2021 01:40:06 +0000 Subject: [PATCH 10/10] allocgate: change resize to return optional instead of error --- lib/std/build/OptionsStep.zig | 2 +- lib/std/heap.zig | 34 +++-- lib/std/heap/arena_allocator.zig | 19 ++- lib/std/heap/general_purpose_allocator.zig | 20 +-- lib/std/heap/log_to_writer_allocator.zig | 13 +- lib/std/heap/logging_allocator.zig | 18 +-- lib/std/mem.zig | 8 +- lib/std/mem/Allocator.zig | 148 ++++++--------------- lib/std/testing/failing_allocator.zig | 7 +- src/link/MachO.zig | 2 +- src/link/tapi.zig | 2 +- src/main.zig | 2 +- src/tracy.zig | 25 ++-- test/compare_output.zig | 6 +- tools/update-linux-headers.zig | 2 +- 15 files changed, 112 insertions(+), 196 deletions(-) diff --git a/lib/std/build/OptionsStep.zig b/lib/std/build/OptionsStep.zig index d106b051718a..eae5983845b3 100644 --- a/lib/std/build/OptionsStep.zig +++ b/lib/std/build/OptionsStep.zig @@ -350,5 +350,5 @@ test "OptionsStep" { \\ , options.contents.items); - _ = try std.zig.parse(&arena.allocator, try options.contents.toOwnedSliceSentinel(0)); + _ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0)); } diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 2d3a96676d45..4ea0ff718fa0 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -129,7 +129,7 @@ const CAllocator = struct { new_len: usize, len_align: u29, return_address: usize, - ) Allocator.Error!usize { + ) ?usize { _ = buf_align; _ = return_address; if (new_len <= buf.len) { @@ -141,7 +141,7 @@ const CAllocator = struct { return mem.alignAllocLen(full_len, new_len, len_align); } } - return error.OutOfMemory; + return null; } fn free( @@ -205,13 +205,13 @@ fn rawCResize( new_len: usize, len_align: u29, ret_addr: usize, -) Allocator.Error!usize { +) ?usize { _ = old_align; _ = ret_addr; if (new_len <= buf.len) { return mem.alignAllocLen(buf.len, new_len, len_align); } - return error.OutOfMemory; + return null; } fn rawCFree( @@ -361,7 +361,7 @@ const PageAllocator = struct { new_size: usize, len_align: u29, return_address: usize, - ) Allocator.Error!usize { + ) ?usize { _ = buf_align; _ = return_address; const new_size_aligned = mem.alignForward(new_size, mem.page_size); @@ -387,7 +387,7 @@ const PageAllocator = struct { if (new_size_aligned <= old_size_aligned) { return alignPageAllocLen(new_size_aligned, new_size, len_align); } - return error.OutOfMemory; + return null; } const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); @@ -403,7 +403,7 @@ const PageAllocator = struct { // TODO: call mremap // TODO: if the next_mmap_addr_hint is within the remapped range, update it - return error.OutOfMemory; + return null; } fn free(_: *c_void, buf_unaligned: []u8, buf_align: u29, return_address: usize) void { @@ -579,11 +579,11 @@ const WasmPageAllocator = struct { new_len: usize, len_align: u29, return_address: usize, - ) error{OutOfMemory}!usize { + ) ?usize { _ = buf_align; _ = return_address; const aligned_len = mem.alignForward(buf.len, mem.page_size); - if (new_len > aligned_len) return error.OutOfMemory; + if (new_len > aligned_len) return null; const current_n = nPages(aligned_len); const new_n = nPages(new_len); if (new_n != current_n) { @@ -674,7 +674,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { new_size: usize, len_align: u29, return_address: usize, - ) error{OutOfMemory}!usize { + ) ?usize { _ = buf_align; _ = return_address; @@ -686,7 +686,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { os.windows.HEAP_REALLOC_IN_PLACE_ONLY, @intToPtr(*c_void, root_addr), amt, - ) orelse return error.OutOfMemory; + ) orelse return null; assert(new_ptr == @intToPtr(*c_void, root_addr)); const return_len = init: { if (len_align == 0) break :init new_size; @@ -788,14 +788,13 @@ pub const FixedBufferAllocator = struct { new_size: usize, len_align: u29, return_address: usize, - ) Allocator.Error!usize { + ) ?usize { _ = buf_align; _ = return_address; assert(self.ownsSlice(buf)); // sanity check if (!self.isLastAllocation(buf)) { - if (new_size > buf.len) - return error.OutOfMemory; + if (new_size > buf.len) return null; return mem.alignAllocLen(buf.len, new_size, len_align); } @@ -806,9 +805,8 @@ pub const FixedBufferAllocator = struct { } const add = new_size - buf.len; - if (add + self.end_index > self.buffer.len) { - return error.OutOfMemory; - } + if (add + self.end_index > self.buffer.len) return null; + self.end_index += add; return new_size; } @@ -891,7 +889,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { new_len: usize, len_align: u29, return_address: usize, - ) error{OutOfMemory}!usize { + ) ?usize { if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address); } else { diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c5a8d6bc7ea5..4bc5d58c1a96 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -78,26 +78,23 @@ pub const ArenaAllocator = struct { const bigger_buf_size = @sizeOf(BufNode) + new_end_index; // Try to grow the buffer in-place - cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) catch |err| switch (err) { - error.OutOfMemory => { - // Allocate a new node if that's not possible - cur_node = try self.createNode(cur_buf.len, n + ptr_align); - continue; - }, + cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) orelse { + // Allocate a new node if that's not possible + cur_node = try self.createNode(cur_buf.len, n + ptr_align); + continue; }; } } - fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize { + fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { _ = buf_align; _ = len_align; _ = ret_addr; - const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory; + const cur_node = self.state.buffer_list.first orelse return null; const cur_buf = cur_node.data[@sizeOf(BufNode)..]; if (@ptrToInt(cur_buf.ptr) + self.state.end_index != @ptrToInt(buf.ptr) + buf.len) { - if (new_len > buf.len) - return error.OutOfMemory; + if (new_len > buf.len) return null; return new_len; } @@ -108,7 +105,7 @@ pub const ArenaAllocator = struct { self.state.end_index += new_len - buf.len; return new_len; } else { - return error.OutOfMemory; + return null; } } diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 5687b1efb274..8160bc2a668e 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -517,7 +517,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { new_size: usize, len_align: u29, ret_addr: usize, - ) Error!usize { + ) ?usize { const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse { if (config.safety) { @panic("Invalid free"); @@ -557,7 +557,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (config.enable_memory_limit) { const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size; if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) { - return error.OutOfMemory; + return null; } self.total_requested_bytes = new_req_bytes; } @@ -565,7 +565,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { self.total_requested_bytes = prev_req_bytes; }; - const result_len = try self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr); + const result_len = self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr) orelse return null; if (config.enable_memory_limit) { entry.value_ptr.requested_size = new_size; @@ -650,7 +650,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { new_size: usize, len_align: u29, ret_addr: usize, - ) Error!usize { + ) ?usize { self.mutex.lock(); defer self.mutex.unlock(); @@ -705,7 +705,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { if (config.enable_memory_limit) { const new_req_bytes = prev_req_bytes + new_size - old_mem.len; if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) { - return error.OutOfMemory; + return null; } self.total_requested_bytes = new_req_bytes; } @@ -726,7 +726,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } return new_size; } - return error.OutOfMemory; + return null; } fn free( @@ -735,8 +735,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { old_align: u29, ret_addr: usize, ) void { - const held = self.mutex.acquire(); - defer held.release(); + self.mutex.lock(); + defer self.mutex.unlock(); assert(old_mem.len != 0); @@ -850,7 +850,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { return true; } - fn alloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { + fn alloc(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 { self.mutex.lock(); defer self.mutex.unlock(); @@ -1065,7 +1065,7 @@ test "shrink large object to large object" { slice[0] = 0x12; slice[60] = 0x34; - slice = try allocator.resize(slice, page_size * 2 + 1); + slice = allocator.resize(slice, page_size * 2 + 1) orelse return; try std.testing.expect(slice[0] == 0x12); try std.testing.expect(slice[60] == 0x34); diff --git a/lib/std/heap/log_to_writer_allocator.zig b/lib/std/heap/log_to_writer_allocator.zig index fa8c19e0a068..c63c1a826f6e 100644 --- a/lib/std/heap/log_to_writer_allocator.zig +++ b/lib/std/heap/log_to_writer_allocator.zig @@ -45,22 +45,23 @@ pub fn LogToWriterAllocator(comptime Writer: type) type { new_len: usize, len_align: u29, ra: usize, - ) error{OutOfMemory}!usize { + ) ?usize { if (new_len <= buf.len) { self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {}; } else { self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {}; } + if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len > buf.len) { self.writer.print(" success!\n", .{}) catch {}; } return resized_len; - } else |e| { - std.debug.assert(new_len > buf.len); - self.writer.print(" failure!\n", .{}) catch {}; - return e; } + + std.debug.assert(new_len > buf.len); + self.writer.print(" failure!\n", .{}) catch {}; + return null; } fn free( @@ -95,7 +96,7 @@ test "LogToWriterAllocator" { var a = try allocator.alloc(u8, 10); a = allocator.shrink(a, 5); try std.testing.expect(a.len == 5); - try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20)); + try std.testing.expect(allocator.resize(a, 20) == null); allocator.free(a); try std.testing.expectEqualSlices(u8, diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index b631cd0b1b7e..0bd0755cfcf6 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -77,7 +77,7 @@ pub fn ScopedLoggingAllocator( new_len: usize, len_align: u29, ra: usize, - ) error{OutOfMemory}!usize { + ) ?usize { if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| { if (new_len <= buf.len) { logHelper( @@ -94,15 +94,15 @@ pub fn ScopedLoggingAllocator( } return resized_len; - } else |err| { - std.debug.assert(new_len > buf.len); - logHelper( - failure_log_level, - "expand - failure: {s} - {} to {}, len_align: {}, buf_align: {}", - .{ @errorName(err), buf.len, new_len, len_align, buf_align }, - ); - return err; } + + std.debug.assert(new_len > buf.len); + logHelper( + failure_log_level, + "expand - failure - {} to {}, len_align: {}, buf_align: {}", + .{ buf.len, new_len, len_align, buf_align }, + ); + return null; } fn free( diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 9c7ce3867b31..c310835b61fa 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -88,14 +88,14 @@ pub fn ValidationAllocator(comptime T: type) type { new_len: usize, len_align: u29, ret_addr: usize, - ) Allocator.Error!usize { + ) ?usize { assert(buf.len > 0); if (len_align != 0) { assert(mem.isAlignedAnyAlign(new_len, len_align)); assert(new_len >= len_align); } const underlying = self.getUnderlyingAllocatorPtr(); - const result = try underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr); + const result = underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr) orelse return null; if (len_align == 0) { assert(result == new_len); } else { @@ -188,7 +188,7 @@ test "Allocator.resize" { defer testing.allocator.free(values); for (values) |*v, i| v.* = @intCast(T, i); - values = try testing.allocator.resize(values, values.len + 10); + values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory; try testing.expect(values.len == 110); } @@ -203,7 +203,7 @@ test "Allocator.resize" { defer testing.allocator.free(values); for (values) |*v, i| v.* = @intToFloat(T, i); - values = try testing.allocator.resize(values, values.len + 10); + values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory; try testing.expect(values.len == 110); } } diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 6edad7e05bd3..29fbf7c2c184 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -29,9 +29,9 @@ pub const VTable = struct { /// length returned by `alloc` or `resize`. `buf_align` must equal the same value /// that was passed as the `ptr_align` parameter to the original `alloc` call. /// - /// error.OutOfMemory can only be returned if `new_len` is greater than `buf.len`. + /// `null` can only be returned if `new_len` is greater than `buf.len`. /// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be - /// unmodified and error.OutOfMemory MUST be returned. + /// unmodified and `null` MUST be returned. /// /// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes, /// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not* @@ -42,7 +42,7 @@ pub const VTable = struct { /// /// `ret_addr` is optionally provided as the first return address of the allocation call stack. /// If the value is `0` it means no return address has been provided. - resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, + resize: fn (ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize, /// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`. /// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call. @@ -55,7 +55,7 @@ pub const VTable = struct { pub fn init( pointer: anytype, comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8, - comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize, + comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize, comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void, ) Allocator { const Ptr = @TypeOf(pointer); @@ -71,7 +71,7 @@ pub fn init( const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr }); } - fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize { + fn resize(ptr: *c_void, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { assert(new_len != 0); const self = @ptrCast(Ptr, @alignCast(alignment, ptr)); return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr }); @@ -104,14 +104,12 @@ pub fn NoResize(comptime AllocatorType: type) type { new_len: usize, len_align: u29, ret_addr: usize, - ) Error!usize { + ) ?usize { _ = self; _ = buf_align; _ = len_align; _ = ret_addr; - if (new_len > buf.len) - return error.OutOfMemory; - return new_len; + return if (new_len > buf.len) null else new_len; } }; } @@ -157,7 +155,7 @@ pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u } /// This function is not intended to be called except from within the implementation of an Allocator -pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Error!usize { +pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize { return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr); } @@ -166,99 +164,6 @@ pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usiz return self.vtable.free(self.ptr, buf, buf_align, ret_addr); } -/// Realloc is used to modify the size or alignment of an existing allocation, -/// as well as to provide the allocator with an opportunity to move an allocation -/// to a better location. -/// When the size/alignment is greater than the previous allocation, this function -/// returns `error.OutOfMemory` when the requested new allocation could not be granted. -/// When the size/alignment is less than or equal to the previous allocation, -/// this function returns `error.OutOfMemory` when the allocator decides the client -/// would be better off keeping the extra alignment/size. Clients will call -/// `vtable.resize` when they require the allocator to track a new alignment/size, -/// and so this function should only return success when the allocator considers -/// the reallocation desirable from the allocator's perspective. -/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle -/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator` -/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment -/// is less than or equal to the old allocation, because it cannot reclaim the memory, -/// and thus the `std.ArrayList` would be better off retaining its capacity. -/// When `reallocFn` returns, -/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same -/// as `old_mem` was when `reallocFn` is called. The bytes of -/// `return_value[old_mem.len..]` have undefined values. -/// The returned slice must have its pointer aligned at least to `new_alignment` bytes. -fn reallocBytes( - self: Allocator, - /// Guaranteed to be the same as what was returned from most recent call to - /// `vtable.alloc` or `vtable.resize`. - /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count` - /// is guaranteed to be >= 1. - old_mem: []u8, - /// If `old_mem.len == 0` then this is `undefined`, otherwise: - /// Guaranteed to be the same as what was passed to `allocFn`. - /// Guaranteed to be >= 1. - /// Guaranteed to be a power of 2. - old_alignment: u29, - /// `new_byte_count` must be greater than zero - new_byte_count: usize, - /// Guaranteed to be >= 1. - /// Guaranteed to be a power of 2. - /// Returned slice's pointer must have this alignment. - new_alignment: u29, - /// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly - /// non-zero means the length of the returned slice must be aligned by `len_align` - /// `new_len` must be aligned by `len_align` - len_align: u29, - return_address: usize, -) Error![]u8 { - if (old_mem.len == 0) { - const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address); - // TODO: https://github.com/ziglang/zig/issues/4298 - @memset(new_mem.ptr, undefined, new_byte_count); - return new_mem; - } - - assert(new_byte_count > 0); // `new_byte_count` must greater than zero, this is a resize not a free - - if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { - if (new_byte_count <= old_mem.len) { - const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address); - return old_mem.ptr[0..shrunk_len]; - } - if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| { - assert(resized_len >= new_byte_count); - // TODO: https://github.com/ziglang/zig/issues/4298 - @memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count); - return old_mem.ptr[0..resized_len]; - } else |_| {} - } - if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) { - return error.OutOfMemory; - } - return self.moveBytes(old_mem, old_alignment, new_byte_count, new_alignment, len_align, return_address); -} - -/// Move the given memory to a new location in the given allocator to accomodate a new -/// size and alignment. -fn moveBytes( - self: Allocator, - old_mem: []u8, - old_align: u29, - new_len: usize, - new_alignment: u29, - len_align: u29, - return_address: usize, -) Error![]u8 { - assert(old_mem.len > 0); - assert(new_len > 0); - const new_mem = try self.rawAlloc(new_len, new_alignment, len_align, return_address); - @memcpy(new_mem.ptr, old_mem.ptr, math.min(new_len, old_mem.len)); - // TODO https://github.com/ziglang/zig/issues/4298 - @memset(old_mem.ptr, undefined, old_mem.len); - self.rawFree(old_mem, old_align, return_address); - return new_mem; -} - /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. pub fn create(self: Allocator, comptime T: type) Error!*T { @@ -409,7 +314,7 @@ pub fn allocAdvancedWithRetAddr( } /// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer. -pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old_mem) { +pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) ?@TypeOf(old_mem) { const Slice = @typeInfo(@TypeOf(old_mem)).Pointer; const T = Slice.child; if (new_n == 0) { @@ -417,8 +322,8 @@ pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) Error!@TypeOf(old return &[0]T{}; } const old_byte_slice = mem.sliceAsBytes(old_mem); - const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; - const rc = try self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()); + const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return null; + const rc = self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()) orelse return null; assert(rc == new_byte_count); const new_byte_slice = old_byte_slice.ptr[0..new_byte_count]; return mem.bytesAsSlice(T, new_byte_slice); @@ -488,8 +393,31 @@ pub fn reallocAdvancedWithRetAddr( .exact => 0, .at_least => @sizeOf(T), }; - const new_byte_slice = try self.reallocBytes(old_byte_slice, Slice.alignment, byte_count, new_alignment, len_align, return_address); - return mem.bytesAsSlice(T, @alignCast(new_alignment, new_byte_slice)); + + if (mem.isAligned(@ptrToInt(old_byte_slice.ptr), new_alignment)) { + if (byte_count <= old_byte_slice.len) { + const shrunk_len = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, len_align, return_address); + return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..shrunk_len])); + } + + if (self.rawResize(old_byte_slice, Slice.alignment, byte_count, len_align, return_address)) |resized_len| { + // TODO: https://github.com/ziglang/zig/issues/4298 + @memset(old_byte_slice.ptr + byte_count, undefined, resized_len - byte_count); + return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..resized_len])); + } + } + + if (byte_count <= old_byte_slice.len and new_alignment <= Slice.alignment) { + return error.OutOfMemory; + } + + const new_mem = try self.rawAlloc(byte_count, new_alignment, len_align, return_address); + @memcpy(new_mem.ptr, old_byte_slice.ptr, math.min(byte_count, old_byte_slice.len)); + // TODO https://github.com/ziglang/zig/issues/4298 + @memset(old_byte_slice.ptr, undefined, old_byte_slice.len); + self.rawFree(old_byte_slice, Slice.alignment, return_address); + + return mem.bytesAsSlice(T, @alignCast(new_alignment, new_mem)); } /// Prefer calling realloc to shrink if you can tolerate failure, such as @@ -580,7 +508,7 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T { } /// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning -/// error.OutOfMemory should be impossible. +/// than a `null` return value should be impossible. /// This function allows a runtime `buf_align` value. Callers should generally prefer /// to call `shrink` directly. pub fn shrinkBytes( @@ -592,5 +520,5 @@ pub fn shrinkBytes( return_address: usize, ) usize { assert(new_len <= buf.len); - return self.rawResize(buf, buf_align, new_len, len_align, return_address) catch unreachable; + return self.rawResize(buf, buf_align, new_len, len_align, return_address) orelse unreachable; } diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 5e1084037e80..677ca6f51b7b 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -68,11 +68,8 @@ pub const FailingAllocator = struct { new_len: usize, len_align: u29, ra: usize, - ) error{OutOfMemory}!usize { - const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) catch |e| { - std.debug.assert(new_len > old_mem.len); - return e; - }; + ) ?usize { + const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) orelse return null; if (r < old_mem.len) { self.freed_bytes += old_mem.len - r; } else { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index db2b8ffc4291..fc592ab5e827 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1288,7 +1288,7 @@ fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs: any // TODO this should not be performed if the user specifies `-flat_namespace` flag. // See ld64 manpages. var arena_alloc = std.heap.ArenaAllocator.init(self.base.allocator); - const arena = &arena_alloc.allocator; + const arena = arena_alloc.allocator(); defer arena_alloc.deinit(); while (dependent_libs.readItem()) |*id| { diff --git a/src/link/tapi.zig b/src/link/tapi.zig index 7a55a5104da5..e31ca92ed9b6 100644 --- a/src/link/tapi.zig +++ b/src/link/tapi.zig @@ -138,7 +138,7 @@ pub const LibStub = struct { err: { log.debug("trying to parse as []TbdV3", .{}); const inner = lib_stub.yaml.parse([]TbdV3) catch break :err; - var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len); + var out = try lib_stub.yaml.arena.allocator().alloc(Tbd, inner.len); for (inner) |doc, i| { out[i] = .{ .v3 = doc }; } diff --git a/src/main.zig b/src/main.zig index 981a76a36411..ad868582409e 100644 --- a/src/main.zig +++ b/src/main.zig @@ -159,7 +159,7 @@ pub fn main() anyerror!void { if (tracy.enable_allocation) { var gpa_tracy = tracy.tracyAllocator(gpa); - return mainArgs(&gpa_tracy.allocator, arena, args); + return mainArgs(gpa_tracy.allocator(), arena, args); } return mainArgs(gpa, arena, args); diff --git a/src/tracy.zig b/src/tracy.zig index 064374030fec..9a5bcc749b9f 100644 --- a/src/tracy.zig +++ b/src/tracy.zig @@ -113,20 +113,16 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { const Self = @This(); - pub fn allocator(self: *Self) std.mem.Allocator { - return std.mem.Allocator.init(self, allocFn, resizeFn); - } - - pub fn init(allocator: std.mem.Allocator) Self { + pub fn init(parent_allocator: std.mem.Allocator) Self { return .{ - .parent_allocator = allocator, - .allocator = .{ - .allocFn = allocFn, - .resizeFn = resizeFn, - }, + .parent_allocator = parent_allocator, }; } + pub fn allocator(self: *Self) std.mem.Allocator { + return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn); + } + fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 { const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ret_addr); if (result) |data| { @@ -162,12 +158,11 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type { } return resized_len; - } else |err| { - // this is not really an error condition, during normal operation the compiler hits this case thousands of times - // due to this emitting messages for it is both slow and causes clutter - // messageColor("allocation resize failed", 0xFF0000); - return err; } + + // during normal operation the compiler hits this case thousands of times due to this + // emitting messages for it is both slow and causes clutter + return null; } fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void { diff --git a/test/compare_output.zig b/test/compare_output.zig index 8a0bfc1ac76d..c8b157c3355c 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -496,7 +496,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ var a = try allocator.alloc(u8, 10); \\ a = allocator.shrink(a, 5); \\ try std.testing.expect(a.len == 5); - \\ try std.testing.expectError(error.OutOfMemory, allocator.resize(a, 20)); + \\ try std.testing.expect(allocator.resize(a, 20) == null); \\ allocator.free(a); \\} \\ @@ -514,8 +514,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { , \\debug: alloc - success - len: 10, ptr_align: 1, len_align: 0 \\debug: shrink - success - 10 to 5, len_align: 0, buf_align: 1 - \\error: expand - failure: OutOfMemory - 5 to 20, len_align: 0, buf_align: 1 - \\debug: free - success - len: 5 + \\error: expand - failure - 5 to 20, len_align: 0, buf_align: 1 + \\debug: free - len: 5 \\ ); } diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig index e7693687c4e6..0a3fb85f71cb 100644 --- a/tools/update-linux-headers.zig +++ b/tools/update-linux-headers.zig @@ -131,7 +131,7 @@ const PathTable = std.StringHashMap(*TargetToHash); pub fn main() !void { var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); - const arena = &arena_state.allocator; + const arena = arena_state.allocator(); const args = try std.process.argsAlloc(arena); var search_paths = std.ArrayList([]const u8).init(arena); var opt_out_dir: ?[]const u8 = null;