diff --git a/src/runtime/Activation.zig b/src/runtime/Activation.zig index 6986677..c7c11bd 100644 --- a/src/runtime/Activation.zig +++ b/src/runtime/Activation.zig @@ -9,6 +9,7 @@ const Allocator = std.mem.Allocator; const Actor = @import("./Actor.zig"); const value = @import("./value.zig"); const Value = value.Value; +const Object = @import("object.zig").Object; const bytecode = @import("./bytecode.zig"); const SourceRange = @import("./SourceRange.zig"); const IntegerValue = value.IntegerValue; @@ -107,6 +108,38 @@ pub fn restart(self: *Self) void { self.pc = 0; } +/// Try to see if the current slot for the inline cache is filled and matches +/// the receiver's map; if so, return the matching method object. Invalidate the +/// inline cache entry and return null otherwise. +pub fn getOrInvalidateMethodFromInlineCacheForReceiver( + self: *Self, + vm: *VirtualMachine, + receiver: Object.Ptr, +) ?MethodObject.Ptr { + const activation_object = self.activation_object.get(); + // NOTE: For the time being, we are wasting memory by allocating an + // equally-sized inline cache for the bytecode block of the + // activation; in the future we will map each send instruction to + // an offset within the cache and shrink it drastically. + return activation_object.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, self.pc, receiver); +} + +/// Write the given receiver-method pair into the appropriate offset of the +/// activation object's inline cache (stored in the map). +pub fn writeIntoInlineCache( + self: *Self, + vm: *VirtualMachine, + receiver: Object.Ptr, + method: MethodObject.Ptr, +) void { + const activation_object = self.activation_object.get(); + // NOTE: For the time being, we are wasting memory by allocating an + // equally-sized inline cache for the bytecode block of the + // activation; in the future we will map each send instruction to + // an offset within the cache and shrink it drastically. + activation_object.writeIntoInlineCacheAtOffset(vm, self.pc, receiver, method); +} + pub fn format( activation: Self, comptime fmt: []const u8, @@ -227,13 +260,14 @@ pub const ActivationStack = struct { ) !void { var source_range = SourceRange.initNoRef(current_executable, .{ .start = 0, .end = 1 }); + const entrypoint_block = new_executable.value.getEntrypointBlock(); var token = try vm.heap.getAllocation( - MethodObject.requiredSizeForCreatingTopLevelContext() + + MethodObject.requiredSizeForCreatingTopLevelContext(entrypoint_block) + ActivationObject.requiredSizeForAllocation(0, 0), ); defer token.deinit(); - const toplevel_context_method = try MethodObject.createTopLevelContextForExecutable(vm, &token, new_executable, new_executable.value.getEntrypointBlock()); + const toplevel_context_method = try MethodObject.createTopLevelContextForExecutable(vm, &token, new_executable, entrypoint_block); const activation_slot = try self.getNewActivationSlot(vm.allocator); toplevel_context_method.activateMethod(vm, &token, vm.current_actor.id, vm.lobby(), &.{}, target_location, source_range, activation_slot); } diff --git a/src/runtime/Heap.zig b/src/runtime/Heap.zig index b1e0a98..a1127a5 100644 --- a/src/runtime/Heap.zig +++ b/src/runtime/Heap.zig @@ -403,7 +403,7 @@ const Space = struct { /// order to evacuate all the objects to a higher generation. tenure_target: ?*Space = null, /// The name of this space. - name: [*:0]const u8, + name: []const u8, /// A link node for a newer generation space to scan in order to update /// references from the newer space to the older one. @@ -412,7 +412,7 @@ const Space = struct { previous: ?*const NewerGenerationLink, }; - pub fn lazyInit(heap: *Self, comptime name: [*:0]const u8, size: usize) Space { + pub fn lazyInit(heap: *Self, comptime name: []const u8, size: usize) Space { return Space{ .heap = heap, .name = name, @@ -422,7 +422,7 @@ const Space = struct { }; } - pub fn init(heap: *Self, allocator: Allocator, comptime name: [*:0]const u8, size: usize) !Space { + pub fn init(heap: *Self, allocator: Allocator, comptime name: []const u8, size: usize) !Space { var self = lazyInit(heap, name, size); try self.allocateMemory(allocator); return self; @@ -1038,3 +1038,73 @@ test "link an object to another and perform scavenge" { var referenced_object_value = new_referenced_object.getMap().getSlotByName("actual").?.value; try std.testing.expectEqual(@as(u64, 0xDEADBEEF), referenced_object_value.asUnsignedInteger()); } + +fn HeapAddress(comptime T: type) type { + return struct { + heap: *const Self, + address: T, + + fn spaceNameIfAddressWithin(self: @This(), space: *const Space) ?[]const u8 { + const memory_start = @ptrToInt(space.memory.ptr); + const memory_end = @ptrToInt(space.memory.ptr + space.memory.len); + const address = @ptrToInt(self.address); + + if (address >= memory_start and address < memory_end) + return @as([]const u8, space.name); + return null; + } + + fn spaceName(self: @This()) []const u8 { + if (self.spaceNameIfAddressWithin(&self.heap.eden)) |name| + return name; + if (self.spaceNameIfAddressWithin(&self.heap.from_space)) |name| + return name; + if (self.spaceNameIfAddressWithin(&self.heap.to_space)) |name| + return name; + if (self.spaceNameIfAddressWithin(&self.heap.old_space)) |name| + return name; + @panic("!!! This address isn't within the heap!"); + } + + fn spaceOffsetIfAddressWithin(self: @This(), space: *const Space) ?usize { + const memory_start = @ptrToInt(space.memory.ptr); + const memory_end = @ptrToInt(space.memory.ptr + space.memory.len); + const address = @ptrToInt(self.address); + + if (address >= memory_start and address < memory_end) + return address - memory_start; + return null; + } + + fn spaceOffset(self: @This()) usize { + if (self.spaceOffsetIfAddressWithin(&self.heap.eden)) |offset| + return offset; + if (self.spaceOffsetIfAddressWithin(&self.heap.from_space)) |offset| + return offset; + if (self.spaceOffsetIfAddressWithin(&self.heap.to_space)) |offset| + return offset; + if (self.spaceOffsetIfAddressWithin(&self.heap.old_space)) |offset| + return offset; + @panic("!!! This address isn't within the heap!"); + } + + pub fn format( + self: @This(), + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + _ = fmt; + + try writer.writeByte('['); + try writer.writeAll(self.spaceName()); + try writer.writeByte('+'); + try std.fmt.formatInt(self.spaceOffset(), 10, .lower, options, writer); + try writer.writeByte(']'); + } + }; +} + +pub fn asHeapAddress(self: *const Self, value: anytype) HeapAddress(@TypeOf(value)) { + return .{ .heap = self, .address = value }; +} diff --git a/src/runtime/interpreter.zig b/src/runtime/interpreter.zig index 07ece61..8e3d8e1 100644 --- a/src/runtime/interpreter.zig +++ b/src/runtime/interpreter.zig @@ -422,6 +422,56 @@ fn performPrimitiveSend( ); } +/// Try to see if the current slot for the inline cache is filled and matches +/// the receiver's map; if so, return the matching method object. Invalidate the +/// inline cache entry and return null otherwise. +fn getOrInvalidateMethodFromInlineCacheForReceiver( + vm: *VirtualMachine, + actor: *Actor, + receiver: Value, +) ?MethodObject.Ptr { + if (!receiver.isObjectReference()) { + // std.debug.print("MISS because not object ref\n", .{}); + return null; + } + var real_receiver = receiver; + if (receiver.asObject().asType(.Activation)) |activation| { + real_receiver = activation.findActivationReceiver(); + } + if (!real_receiver.isObjectReference()) { + // std.debug.print("MISS because not object ref\n", .{}); + return null; + } + + const current_activation = actor.activation_stack.getCurrent(); + return current_activation.getOrInvalidateMethodFromInlineCacheForReceiver(vm, real_receiver.asObject()); +} + +/// If the receiver is an object, write the receiver-method pair into the +/// current activation's inline cache. +fn writeIntoInlineCache( + vm: *VirtualMachine, + actor: *Actor, + receiver: Value, + method: MethodObject.Ptr, +) void { + if (!receiver.isObjectReference()) { + // std.debug.print("NOWR because not object ref\n", .{}); + return; + } + var real_receiver = receiver; + if (receiver.asObject().asType(.Activation)) |activation| { + real_receiver = activation.findActivationReceiver(); + } + if (!real_receiver.isObjectReference()) { + // std.debug.print("NOWR because not object ref\n", .{}); + return; + } + + const current_activation = actor.activation_stack.getCurrent(); + current_activation.writeIntoInlineCache(vm, real_receiver.asObject(), method); +} + /// Sends a message to the given receiver, returning the result as a normal /// completion if it can be immediately resolved; if the message send must /// create a new activation, pushes the activation onto the stack and returns @@ -470,10 +520,28 @@ pub fn sendMessage( actor.ensureCanRead(receiver, source_range); + if (getOrInvalidateMethodFromInlineCacheForReceiver(vm, actor, receiver)) |method| { + const argument_count = method.getArgumentSlotCount(); + const argument_slice = actor.argument_stack.lastNItems(argument_count); + + // Advance the instruction for the activation that will be returned to. + _ = actor.activation_stack.getCurrent().advanceInstruction(); + + try executeMethod(vm, actor, receiver, method, argument_slice, target_location, source_range); + + actor.argument_stack.popNItems(argument_count); + // Bump the argument stack height of the (now current) activation since + // we've now popped this activation's items off it. + actor.activation_stack.getCurrent().stack_snapshot.bumpArgumentHeight(actor); + return null; + } + return switch (receiver.lookup(vm, message_name)) { .Regular => |lookup_result| { if (lookup_result.isObjectReference()) { if (lookup_result.asObject().asType(.Method)) |method| { + writeIntoInlineCache(vm, actor, receiver, method); + const argument_count = method.getArgumentSlotCount(); const argument_slice = actor.argument_stack.lastNItems(argument_count); @@ -734,15 +802,15 @@ fn createMethod( argument_slot_count += 1; } + const block = executable.value.getBlock(block_index); var token = try vm.heap.getAllocation( - MethodMap.requiredSizeForAllocation(total_slot_count) + + MethodMap.requiredSizeForAllocation(block, total_slot_count) + MethodObject.requiredSizeForAllocation(total_assignable_slot_count), ); defer token.deinit(); - const block = executable.value.getBlock(block_index); var method_map = try MethodMap.create( - vm.getMapMap(), + vm, &token, argument_slot_count, total_slot_count, @@ -803,13 +871,13 @@ fn createBlock( std.debug.assert(nonlocal_return_target_activation.get(actor.activation_stack).?.nonlocal_return_target_activation == null); var token = try vm.heap.getAllocation( - BlockMap.requiredSizeForAllocation(total_slot_count) + + BlockMap.requiredSizeForAllocation(block, total_slot_count) + BlockObject.requiredSizeForAllocation(total_assignable_slot_count), ); defer token.deinit(); var block_map = try BlockMap.create( - vm.getMapMap(), + vm, &token, argument_slot_count, total_slot_count, diff --git a/src/runtime/objects/activation.zig b/src/runtime/objects/activation.zig index 270e544..4ba6316 100644 --- a/src/runtime/objects/activation.zig +++ b/src/runtime/objects/activation.zig @@ -9,6 +9,7 @@ const Map = map_import.Map; const Slot = @import("../slot.zig").Slot; const Heap = @import("../Heap.zig"); const slots = @import("slots.zig"); +const Object = @import("../object.zig").Object; const MapType = map_import.MapType; const bytecode = @import("../bytecode.zig"); const BlockMap = @import("block.zig").BlockMap; @@ -16,6 +17,7 @@ const MethodMap = @import("method.zig").MethodMap; const map_import = @import("map.zig"); const SlotsObject = slots.Slots; const GenericValue = value_import.Value; +const MethodObject = @import("method.zig").Method; const value_import = @import("../value.zig"); const object_lookup = @import("../object_lookup.zig"); const stage2_compat = @import("../../utility/stage2_compat.zig"); @@ -128,11 +130,11 @@ pub const Activation = extern struct { // --- Slot counts --- pub fn getAssignableSlotCount(self: Activation.Ptr) u8 { - return self.dispatch("getAssignableSlotCount"); + return self.dispatch("getAssignableSlotCount", .{}); } pub fn getArgumentSlotCount(self: Activation.Ptr) u8 { - return self.dispatch("getArgumentSlotCount"); + return self.dispatch("getArgumentSlotCount", .{}); } // --- Map forwarding --- @@ -151,10 +153,18 @@ pub const Activation = extern struct { }; } + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: Activation.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?MethodObject.Ptr { + return self.dispatch("getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver", .{ vm, offset, receiver }); + } + + pub fn writeIntoInlineCacheAtOffset(self: Activation.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr, method: MethodObject.Ptr) void { + self.dispatch("writeIntoInlineCacheAtOffset", .{ vm, offset, receiver, method }); + } + // --- Slots and slot values --- pub fn getSlots(self: Activation.Ptr) Slot.Slice { - return self.dispatch("getSlots"); + return self.dispatch("getSlots", .{}); } /// Return a slice of `GenericValue`s for the assignable slots that are after the @@ -265,10 +275,10 @@ pub const Activation = extern struct { return @typeInfo(@TypeOf(@field(MethodMap, fn_name))).Fn.return_type.?; } - fn dispatch(self: Activation.Ptr, comptime fn_name: []const u8) DispatchReturn(fn_name) { + fn dispatch(self: Activation.Ptr, comptime fn_name: []const u8, args: anytype) DispatchReturn(fn_name) { return switch (self.getActivationType()) { - .Method => @call(.auto, @field(MethodMap, fn_name), .{self.getMethodMap()}), - .Block => @call(.auto, @field(BlockMap, fn_name), .{self.getBlockMap()}), + .Method => @call(.auto, @field(MethodMap, fn_name), .{self.getMethodMap()} ++ args), + .Block => @call(.auto, @field(BlockMap, fn_name), .{self.getBlockMap()} ++ args), }; } }; diff --git a/src/runtime/objects/array.zig b/src/runtime/objects/array.zig index e8b8277..482b196 100644 --- a/src/runtime/objects/array.zig +++ b/src/runtime/objects/array.zig @@ -22,6 +22,7 @@ pub const Array = extern struct { object: Object align(@alignOf(u64)), pub const Ptr = stage2_compat.HeapPtr(Array, .Mutable); + pub const Type = .Array; pub const Value = value_import.ObjectValue(Array); /// Create a new array with the given values and filling extra items with diff --git a/src/runtime/objects/block.zig b/src/runtime/objects/block.zig index cc9c43e..c4e9a99 100644 --- a/src/runtime/objects/block.zig +++ b/src/runtime/objects/block.zig @@ -11,10 +11,12 @@ const Heap = @import("../Heap.zig"); const debug = @import("../../debug.zig"); const slots = @import("slots.zig"); const Value = value_import.Value; +const Object = @import("../object.zig").Object; const bytecode = @import("../bytecode.zig"); const Activation = @import("../Activation.zig"); const SlotsObject = slots.Slots; const SourceRange = @import("../SourceRange.zig"); +const MethodObject = @import("method.zig").Method; const value_import = @import("../value.zig"); const ExecutableMap = @import("executable_map.zig").ExecutableMap; const stage2_compat = @import("../../utility/stage2_compat.zig"); @@ -182,7 +184,7 @@ pub const BlockMap = extern struct { /// Borrows a ref for `script` from the caller. Takes ownership of /// `statements`. pub fn create( - map_map: Map.Ptr, + vm: *VirtualMachine, token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, @@ -191,11 +193,11 @@ pub const BlockMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) !BlockMap.Ptr { - const size = BlockMap.requiredSizeForAllocation(total_slot_count); + const size = BlockMap.requiredSizeForSelfAllocation(total_slot_count); var memory_area = token.allocate(.Object, size); var self = @ptrCast(BlockMap.Ptr, memory_area); - self.init(map_map, argument_slot_count, total_slot_count, parent_activation, nonlocal_return_target_activation, block, executable); + self.init(vm, token, argument_slot_count, total_slot_count, parent_activation, nonlocal_return_target_activation, block, executable); try token.heap.markAddressAsNeedingFinalization(memory_area); return self; @@ -203,7 +205,8 @@ pub const BlockMap = extern struct { fn init( self: BlockMap.Ptr, - map_map: Map.Ptr, + vm: *VirtualMachine, + token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, parent_activation: Activation.ActivationRef, @@ -211,7 +214,7 @@ pub const BlockMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) void { - self.base_map.init(.Block, map_map, argument_slot_count, total_slot_count, block, executable); + self.base_map.allocateAndInit(vm, token, .Block, argument_slot_count, total_slot_count, block, executable); self.parent_activation = parent_activation; self.nonlocal_return_target_activation = nonlocal_return_target_activation; } @@ -231,7 +234,7 @@ pub const BlockMap = extern struct { pub fn clone(self: BlockMap.Ptr, vm: *VirtualMachine, token: *Heap.AllocationToken) !BlockMap.Ptr { const new_map = try create( - vm.getMapMap(), + vm, token, self.getArgumentSlotCount(), self.base_map.slots.information.slot_count, @@ -248,14 +251,29 @@ pub const BlockMap = extern struct { } pub fn getSizeInMemory(self: BlockMap.Ptr) usize { - return requiredSizeForAllocation(self.base_map.slots.information.slot_count); + return requiredSizeForSelfAllocation(self.base_map.slots.information.slot_count); } pub fn getSizeForCloning(self: BlockMap.Ptr) usize { - return self.getSizeInMemory(); + return requiredSizeForAllocation(self.base_map.block.get(), self.base_map.slots.information.slot_count); } - pub fn requiredSizeForAllocation(slot_count: u32) usize { + /// Return the size required for allocating just the map itself. + pub fn requiredSizeForSelfAllocation(slot_count: u32) usize { return @sizeOf(BlockMap) + slot_count * @sizeOf(Slot); } + + pub fn requiredSizeForAllocation(bytecode_block: *bytecode.Block, slot_count: u32) usize { + var required_memory = requiredSizeForSelfAllocation(slot_count); + required_memory += ExecutableMap.requiredSizeForAllocation(bytecode_block); + return required_memory; + } + + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: BlockMap.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?MethodObject.Ptr { + return self.base_map.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, offset, receiver); + } + + pub fn writeIntoInlineCacheAtOffset(self: BlockMap.Ptr, vm: *VirtualMachine, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { + self.base_map.writeIntoInlineCacheAtOffset(vm, offset, object, method); + } }; diff --git a/src/runtime/objects/executable_map.zig b/src/runtime/objects/executable_map.zig index 4be64ec..1ee22ab 100644 --- a/src/runtime/objects/executable_map.zig +++ b/src/runtime/objects/executable_map.zig @@ -6,13 +6,20 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const Map = @import("map.zig").Map; +const Heap = @import("../Heap.zig"); const Value = value_import.Value; +const Object = @import("../object.zig").Object; const MapType = @import("map.zig").MapType; const bytecode = @import("../bytecode.zig"); const SlotsMap = @import("slots.zig").SlotsMap; +const ArrayMap = @import("./array.zig").ArrayMap; +const ArrayObject = @import("./array.zig").Array; +const MethodObject = @import("./method.zig").Method; const value_import = @import("../value.zig"); const PointerValue = value_import.PointerValue; const stage2_compat = @import("../../utility/stage2_compat.zig"); +const VirtualMachine = @import("../VirtualMachine.zig"); +const value_inspector = @import("../value_inspector.zig"); const RefCountedValue = value_import.RefCountedValue; /// An "executable map" is one that contains a reference to executable code. @@ -24,6 +31,11 @@ pub const ExecutableMap = extern struct { slots: SlotsMap align(@alignOf(u64)), /// The address of the bytecode block. Owned by definition_executable_ref. block: PointerValue(bytecode.Block) align(@alignOf(u64)), + /// An inline cache twice the length of `block`. The items form pairs of + /// [receiver map reference, method reference]. When the receiver map + /// reference matches what's in the cache, we directly use the method + /// instead of performing a lookup. + inline_cache: ArrayObject.Value, /// The executable which this map was created from. definition_executable_ref: RefCountedValue(bytecode.Executable) align(@alignOf(u64)), @@ -35,13 +47,42 @@ pub const ExecutableMap = extern struct { }; /// Refs `script`. - pub fn init( + pub fn allocateAndInit( + self: ExecutableMap.Ptr, + vm: *VirtualMachine, + token: *Heap.AllocationToken, + comptime map_type: MapType, + argument_slot_count: u8, + total_slot_count: u32, + block: *bytecode.Block, + executable: bytecode.Executable.Ref, + ) void { + const map_map = vm.getMapMap(); + + const inline_cache_size = block.getLength() * 2; + const inline_cache_map = ArrayMap.create(map_map, token, inline_cache_size); + // TODO: Use GlobalActorID! + const inline_cache = ArrayObject.createWithValues(token, 0, inline_cache_map, &.{}, vm.nil()); + + self.init( + map_type, + map_map, + argument_slot_count, + total_slot_count, + block, + ArrayObject.Value.init(inline_cache), + executable, + ); + } + + fn init( self: ExecutableMap.Ptr, comptime map_type: MapType, map_map: Map.Ptr, argument_slot_count: u8, total_slot_count: u32, block: *bytecode.Block, + inline_cache: ArrayObject.Value, executable: bytecode.Executable.Ref, ) void { std.debug.assert(argument_slot_count <= total_slot_count); @@ -51,6 +92,7 @@ pub const ExecutableMap = extern struct { self.setArgumentSlotCount(argument_slot_count); self.block = PointerValue(bytecode.Block).init(block); + self.inline_cache = inline_cache; self.definition_executable_ref = RefCountedValue(bytecode.Executable).init(executable); } @@ -68,4 +110,52 @@ pub const ExecutableMap = extern struct { fn setArgumentSlotCount(self: ExecutableMap.Ptr, count: u8) void { @ptrCast(*ExecutableInformation, &self.slots.information.extra).argument_slot_count = count; } + + pub fn requiredSizeForAllocation(block: *bytecode.Block) usize { + // Since we will be allocating an array as well as its map, we need to + // include both of those in our required size calculation. + var required_size = ArrayMap.requiredSizeForAllocation(); + required_size += ArrayObject.requiredSizeForAllocation(block.getLength() * 2); + return required_size; + } + + // --- Inline cache operations --- + + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver( + self: ExecutableMap.Ptr, + vm: *VirtualMachine, + offset: usize, + receiver: Object.Ptr, + ) ?MethodObject.Ptr { + const inline_cache = self.inline_cache.get(); + std.debug.assert(offset < inline_cache.getSize() / 2); + + const inline_cache_array = self.inline_cache.get().getValues(); + if (inline_cache_array[offset * 2].data != receiver.map.data) { + // std.debug.print("MISS expected={} got={}\n", .{ vm.heap.asHeapAddress(receiver.map.asObject()), vm.heap.asHeapAddress(inline_cache_array[offset * 2].asObject()) }); + inline_cache_array[offset * 2] = vm.nil(); + inline_cache_array[(offset * 2) + 1] = vm.nil(); + return null; + } + + const method = inline_cache_array[(offset * 2) + 1].asObject().mustBeType(.Method); + // std.debug.print("HIT receiver.map={} method={} (\"{s}\")\n", .{ vm.heap.asHeapAddress(receiver.map.asObject()), vm.heap.asHeapAddress(method), method.getMap().method_name.asByteArray().getValues() }); + // std.debug.print(" ", .{}); + // value_inspector.inspectValue(.Inline, vm, receiver.asValue()) catch unreachable; + // std.debug.print("\n", .{}); + return method; + } + + pub fn writeIntoInlineCacheAtOffset(self: ExecutableMap.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr, method: MethodObject.Ptr) void { + if (vm.getMapMap().asValue().data == receiver.map.data) + return; + + const inline_cache = self.inline_cache.get(); + std.debug.assert(offset < inline_cache.getSize() / 2); + + const inline_cache_array = self.inline_cache.get().getValues(); + inline_cache_array[offset * 2] = receiver.map; + inline_cache_array[(offset * 2) + 1] = method.asValue(); + // std.debug.print("WRITE receiver.map={} method={} (\"{s}\")\n", .{ vm.heap.asHeapAddress(receiver.map.asObject()), vm.heap.asHeapAddress(method), method.getMap().method_name.asByteArray().getValues() }); + } }; diff --git a/src/runtime/objects/method.zig b/src/runtime/objects/method.zig index bd1fe47..b69f007 100644 --- a/src/runtime/objects/method.zig +++ b/src/runtime/objects/method.zig @@ -9,6 +9,7 @@ const Map = @import("map.zig").Map; const Heap = @import("../Heap.zig"); const Slot = @import("../slot.zig").Slot; const slots = @import("slots.zig"); +const Object = @import("../object.zig").Object; const bytecode = @import("../bytecode.zig"); const ByteArray = @import("../ByteArray.zig"); const Activation = @import("../Activation.zig"); @@ -103,14 +104,14 @@ pub const Method = extern struct { ) !Method.Ptr { const toplevel_context_method_map = blk: { const toplevel_context_name = ByteArray.createFromString(token, toplevel_context_string); - break :blk try MethodMap.create(vm.getMapMap(), token, 0, 0, false, toplevel_context_name, block, executable); + break :blk try MethodMap.create(vm, token, 0, 0, false, toplevel_context_name, block, executable); }; return create(token, vm.current_actor.id, toplevel_context_method_map, &.{}); } - pub fn requiredSizeForCreatingTopLevelContext() usize { + pub fn requiredSizeForCreatingTopLevelContext(block: *bytecode.Block) usize { return ByteArray.requiredSizeForAllocation(toplevel_context_string.len) + - MethodMap.requiredSizeForAllocation(0) + + MethodMap.requiredSizeForAllocation(block, 0) + Method.requiredSizeForAllocation(0); } @@ -177,7 +178,7 @@ pub const MethodMap = extern struct { /// Borrows a ref for `script` from the caller. Takes ownership of /// `statements`. pub fn create( - map_map: Map.Ptr, + vm: *VirtualMachine, token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, @@ -186,11 +187,11 @@ pub const MethodMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) !MethodMap.Ptr { - const size = MethodMap.requiredSizeForAllocation(total_slot_count); + const size = MethodMap.requiredSizeForSelfAllocation(total_slot_count); var memory_area = token.allocate(.Object, size); var self = @ptrCast(MethodMap.Ptr, memory_area); - self.init(map_map, argument_slot_count, total_slot_count, is_inline_method, method_name, block, executable); + self.init(vm, token, argument_slot_count, total_slot_count, is_inline_method, method_name, block, executable); try token.heap.markAddressAsNeedingFinalization(memory_area); return self; @@ -198,7 +199,8 @@ pub const MethodMap = extern struct { fn init( self: MethodMap.Ptr, - map_map: Map.Ptr, + vm: *VirtualMachine, + token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, is_inline_method: bool, @@ -206,7 +208,7 @@ pub const MethodMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) void { - self.base_map.init(.Method, map_map, argument_slot_count, total_slot_count, block, executable); + self.base_map.allocateAndInit(vm, token, .Method, argument_slot_count, total_slot_count, block, executable); self.setInlineMethod(is_inline_method); self.method_name = method_name.asValue(); } @@ -238,7 +240,7 @@ pub const MethodMap = extern struct { pub fn clone(self: MethodMap.Ptr, vm: *VirtualMachine, token: *Heap.AllocationToken) !MethodMap.Ptr { const new_map = try create( - vm.getMapMap(), + vm, token, self.getArgumentSlotCount(), self.base_map.slots.information.slot_count, @@ -255,14 +257,29 @@ pub const MethodMap = extern struct { } pub fn getSizeInMemory(self: MethodMap.Ptr) usize { - return requiredSizeForAllocation(self.base_map.slots.information.slot_count); + return requiredSizeForSelfAllocation(self.base_map.slots.information.slot_count); } pub fn getSizeForCloning(self: MethodMap.Ptr) usize { - return self.getSizeInMemory(); + return requiredSizeForAllocation(self.base_map.block.get(), self.base_map.slots.information.slot_count); } - pub fn requiredSizeForAllocation(slot_count: u32) usize { + /// Return the size required for allocating just the map itself. + pub fn requiredSizeForSelfAllocation(slot_count: u32) usize { return @sizeOf(MethodMap) + slot_count * @sizeOf(Slot); } + + pub fn requiredSizeForAllocation(bytecode_block: *bytecode.Block, slot_count: u32) usize { + var required_memory = requiredSizeForSelfAllocation(slot_count); + required_memory += ExecutableMap.requiredSizeForAllocation(bytecode_block); + return required_memory; + } + + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: MethodMap.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?Method.Ptr { + return self.base_map.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, offset, receiver); + } + + pub fn writeIntoInlineCacheAtOffset(self: MethodMap.Ptr, vm: *VirtualMachine, offset: usize, object: Object.Ptr, method: Method.Ptr) void { + self.base_map.writeIntoInlineCacheAtOffset(vm, offset, object, method); + } };