@@ -25,10 +25,9 @@ relocs_section_index: u32 = 0,
2525/// Index of this atom in the linker's atoms table.
2626atom_index : Index = 0 ,
2727
28- /// Points to the previous and next neighbors, based on the `text_offset`.
29- /// This can be used to find, for example, the capacity of this `TextBlock`.
30- prev_index : Index = 0 ,
31- next_index : Index = 0 ,
28+ /// Points to the previous and next neighbors.
29+ prev_atom_ref : Elf.Ref = .{},
30+ next_atom_ref : Elf.Ref = .{},
3231
3332/// Specifies whether this atom is alive or has been garbage collected.
3433alive : bool = true ,
@@ -48,10 +47,22 @@ pub fn name(self: Atom, elf_file: *Elf) [:0]const u8 {
4847}
4948
5049pub fn address (self : Atom , elf_file : * Elf ) i64 {
51- const shdr = elf_file .shdrs .items [self .output_section_index ];
50+ const shdr = elf_file .sections .items ( .shdr ) [self .output_section_index ];
5251 return @as (i64 , @intCast (shdr .sh_addr )) + self .value ;
5352}
5453
54+ pub fn ref (self : Atom ) Elf.Ref {
55+ return .{ .index = self .atom_index , .file = self .file_index };
56+ }
57+
58+ pub fn prevAtom (self : Atom , elf_file : * Elf ) ? * Atom {
59+ return elf_file .atom (self .prev_atom_ref );
60+ }
61+
62+ pub fn nextAtom (self : Atom , elf_file : * Elf ) ? * Atom {
63+ return elf_file .atom (self .next_atom_ref );
64+ }
65+
5566pub fn debugTombstoneValue (self : Atom , target : Symbol , elf_file : * Elf ) ? u64 {
5667 if (target .mergeSubsection (elf_file )) | msub | {
5768 if (msub .alive ) return null ;
@@ -95,18 +106,16 @@ pub fn priority(self: Atom, elf_file: *Elf) u64 {
95106/// File offset relocation happens transparently, so it is not included in
96107/// this calculation.
97108pub fn capacity (self : Atom , elf_file : * Elf ) u64 {
98- const zo = elf_file .zigObjectPtr ().? ;
99- const next_addr = if (zo .atom (self .next_index )) | next |
100- next .address (elf_file )
109+ const next_addr = if (self .nextAtom (elf_file )) | next_atom |
110+ next_atom .address (elf_file )
101111 else
102112 std .math .maxInt (u32 );
103113 return @intCast (next_addr - self .address (elf_file ));
104114}
105115
106116pub fn freeListEligible (self : Atom , elf_file : * Elf ) bool {
107- const zo = elf_file .zigObjectPtr ().? ;
108117 // No need to keep a free list node for the last block.
109- const next = zo . atom ( self .next_index ) orelse return false ;
118+ const next = self .nextAtom ( elf_file ) orelse return false ;
110119 const cap : u64 = @intCast (next .address (elf_file ) - self .address (elf_file ));
111120 const ideal_cap = Elf .padToIdeal (self .size );
112121 if (cap <= ideal_cap ) return false ;
@@ -115,28 +124,27 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
115124}
116125
117126pub fn allocate (self : * Atom , elf_file : * Elf ) ! void {
118- const zo = elf_file .zigObjectPtr ().? ;
119- const shdr = & elf_file .shdrs .items [self .output_section_index ];
120- const meta = elf_file .last_atom_and_free_list_table .getPtr (self .output_section_index ).? ;
121- const free_list = & meta .free_list ;
122- const last_atom_index = & meta .last_atom_index ;
127+ const slice = elf_file .sections .slice ();
128+ const shdr = & slice .items (.shdr )[self .output_section_index ];
129+ const free_list = & slice .items (.free_list )[self .output_section_index ];
130+ const last_atom_ref = & slice .items (.last_atom )[self .output_section_index ];
123131 const new_atom_ideal_capacity = Elf .padToIdeal (self .size );
124132
125133 // We use these to indicate our intention to update metadata, placing the new atom,
126134 // and possibly removing a free list node.
127135 // It would be simpler to do it inside the for loop below, but that would cause a
128136 // problem if an error was returned later in the function. So this action
129137 // is actually carried out at the end of the function, when errors are no longer possible.
130- var atom_placement : ? Atom.Index = null ;
138+ var atom_placement : ? Elf.Ref = null ;
131139 var free_list_removal : ? usize = null ;
132140
133141 // First we look for an appropriately sized free list node.
134142 // The list is unordered. We'll just take the first thing that works.
135143 self .value = blk : {
136144 var i : usize = if (elf_file .base .child_pid == null ) 0 else free_list .items .len ;
137145 while (i < free_list .items .len ) {
138- const big_atom_index = free_list .items [i ];
139- const big_atom = zo .atom (big_atom_index ).? ;
146+ const big_atom_ref = free_list .items [i ];
147+ const big_atom = elf_file .atom (big_atom_ref ).? ;
140148 // We now have a pointer to a live atom that has too much capacity.
141149 // Is it enough that we could fit this new atom?
142150 const cap = big_atom .capacity (elf_file );
@@ -163,72 +171,74 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
163171 const keep_free_list_node = remaining_capacity >= Elf .min_text_capacity ;
164172
165173 // Set up the metadata to be updated, after errors are no longer possible.
166- atom_placement = big_atom_index ;
174+ atom_placement = big_atom_ref ;
167175 if (! keep_free_list_node ) {
168176 free_list_removal = i ;
169177 }
170178 break :blk @intCast (new_start_vaddr );
171- } else if (zo .atom (last_atom_index .* )) | last | {
172- const ideal_capacity = Elf .padToIdeal (last .size );
173- const ideal_capacity_end_vaddr = @as (u64 , @intCast (last .value )) + ideal_capacity ;
179+ } else if (elf_file .atom (last_atom_ref .* )) | last_atom | {
180+ const ideal_capacity = Elf .padToIdeal (last_atom .size );
181+ const ideal_capacity_end_vaddr = @as (u64 , @intCast (last_atom .value )) + ideal_capacity ;
174182 const new_start_vaddr = self .alignment .forward (ideal_capacity_end_vaddr );
175183 // Set up the metadata to be updated, after errors are no longer possible.
176- atom_placement = last . atom_index ;
184+ atom_placement = last_atom . ref () ;
177185 break :blk @intCast (new_start_vaddr );
178186 } else {
179187 break :blk 0 ;
180188 }
181189 };
182190
183- log .debug ("allocated atom({d }) : '{s}' at 0x{x} to 0x{x}" , .{
184- self .atom_index ,
191+ log .debug ("allocated atom({}) : '{s}' at 0x{x} to 0x{x}" , .{
192+ self .ref () ,
185193 self .name (elf_file ),
186194 self .address (elf_file ),
187195 self .address (elf_file ) + @as (i64 , @intCast (self .size )),
188196 });
189197
190- const expand_section = if (atom_placement ) | placement_index |
191- zo .atom (placement_index ).? .next_index == 0
198+ const expand_section = if (atom_placement ) | placement_ref |
199+ elf_file .atom (placement_ref ).? .nextAtom ( elf_file ) == null
192200 else
193201 true ;
194202 if (expand_section ) {
195203 const needed_size : u64 = @intCast (self .value + @as (i64 , @intCast (self .size )));
196204 try elf_file .growAllocSection (self .output_section_index , needed_size );
197- last_atom_index .* = self .atom_index ;
198-
199- const zig_object = elf_file .zigObjectPtr ().? ;
200- if (zig_object .dwarf ) | _ | {
201- // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
202- // range of the compilation unit. When we expand the text section, this range changes,
203- // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
204- zig_object .debug_info_section_dirty = true ;
205- // This becomes dirty for the same reason. We could potentially make this more
206- // fine-grained with the addition of support for more compilation units. It is planned to
207- // model each package as a different compilation unit.
208- zig_object .debug_aranges_section_dirty = true ;
209- zig_object .debug_rnglists_section_dirty = true ;
205+ last_atom_ref .* = self .ref ();
206+
207+ switch (self .file (elf_file ).? ) {
208+ .zig_object = > | zo | if (zo .dwarf ) | _ | {
209+ // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
210+ // range of the compilation unit. When we expand the text section, this range changes,
211+ // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
212+ zo .debug_info_section_dirty = true ;
213+ // This becomes dirty for the same reason. We could potentially make this more
214+ // fine-grained with the addition of support for more compilation units. It is planned to
215+ // model each package as a different compilation unit.
216+ zo .debug_aranges_section_dirty = true ;
217+ zo .debug_rnglists_section_dirty = true ;
218+ },
219+ else = > {},
210220 }
211221 }
212222 shdr .sh_addralign = @max (shdr .sh_addralign , self .alignment .toByteUnits ().? );
213223
214224 // This function can also reallocate an atom.
215225 // In this case we need to "unplug" it from its previous location before
216226 // plugging it in to its new location.
217- if (zo . atom ( self .prev_index )) | prev | {
218- prev .next_index = self .next_index ;
227+ if (self .prevAtom ( elf_file )) | prev | {
228+ prev .next_atom_ref = self .next_atom_ref ;
219229 }
220- if (zo . atom ( self .next_index )) | next | {
221- next .prev_index = self .prev_index ;
230+ if (self .nextAtom ( elf_file )) | next | {
231+ next .prev_atom_ref = self .prev_atom_ref ;
222232 }
223233
224- if (atom_placement ) | big_atom_index | {
225- const big_atom = zo .atom (big_atom_index ).? ;
226- self .prev_index = big_atom_index ;
227- self .next_index = big_atom .next_index ;
228- big_atom .next_index = self .atom_index ;
234+ if (atom_placement ) | big_atom_ref | {
235+ const big_atom = elf_file .atom (big_atom_ref ).? ;
236+ self .prev_atom_ref = big_atom_ref ;
237+ self .next_atom_ref = big_atom .next_atom_ref ;
238+ big_atom .next_atom_ref = self .ref () ;
229239 } else {
230- self .prev_index = 0 ;
231- self .next_index = 0 ;
240+ self .prev_atom_ref = .{ . index = 0 , . file = 0 } ;
241+ self .next_atom_ref = .{ . index = 0 , . file = 0 } ;
232242 }
233243 if (free_list_removal ) | i | {
234244 _ = free_list .swapRemove (i );
@@ -248,64 +258,70 @@ pub fn grow(self: *Atom, elf_file: *Elf) !void {
248258}
249259
250260pub fn free (self : * Atom , elf_file : * Elf ) void {
251- log .debug ("freeAtom {d} ({s})" , .{ self .atom_index , self .name (elf_file ) });
261+ log .debug ("freeAtom atom({}) ({s})" , .{ self .ref () , self .name (elf_file ) });
252262
253- const zo = elf_file .zigObjectPtr ().? ;
254263 const comp = elf_file .base .comp ;
255264 const gpa = comp .gpa ;
256265 const shndx = self .output_section_index ;
257- const meta = elf_file .last_atom_and_free_list_table . getPtr ( shndx ) .? ;
258- const free_list = & meta . free_list ;
259- const last_atom_index = & meta . last_atom_index ;
266+ const slice = elf_file .sections . slice () ;
267+ const free_list = & slice . items ( . free_list)[ shndx ] ;
268+ const last_atom_ref = & slice . items ( .last_atom )[ shndx ] ;
260269 var already_have_free_list_node = false ;
261270 {
262271 var i : usize = 0 ;
263272 // TODO turn free_list into a hash map
264273 while (i < free_list .items .len ) {
265- if (free_list .items [i ] == self .atom_index ) {
274+ if (free_list .items [i ]. eql ( self .ref ()) ) {
266275 _ = free_list .swapRemove (i );
267276 continue ;
268277 }
269- if (free_list .items [i ] == self .prev_index ) {
270- already_have_free_list_node = true ;
278+ if (self .prevAtom (elf_file )) | prev_atom | {
279+ if (free_list .items [i ].eql (prev_atom .ref ())) {
280+ already_have_free_list_node = true ;
281+ }
271282 }
272283 i += 1 ;
273284 }
274285 }
275286
276- if (zo .atom (last_atom_index .* )) | last_atom | {
277- if (last_atom .atom_index == self .atom_index ) {
278- if (zo . atom ( self .prev_index )) | _ | {
287+ if (elf_file .atom (last_atom_ref .* )) | last_atom | {
288+ if (last_atom .ref (). eql ( self .ref ()) ) {
289+ if (self .prevAtom ( elf_file )) | prev_atom | {
279290 // TODO shrink the section size here
280- last_atom_index .* = self . prev_index ;
291+ last_atom_ref .* = prev_atom . ref () ;
281292 } else {
282- last_atom_index .* = 0 ;
293+ last_atom_ref .* = .{} ;
283294 }
284295 }
285296 }
286297
287- if (zo . atom ( self .prev_index )) | prev | {
288- prev . next_index = self .next_index ;
289- if (! already_have_free_list_node and prev .* .freeListEligible (elf_file )) {
298+ if (self .prevAtom ( elf_file )) | prev_atom | {
299+ prev_atom . next_atom_ref = self .next_atom_ref ;
300+ if (! already_have_free_list_node and prev_atom .* .freeListEligible (elf_file )) {
290301 // The free list is heuristics, it doesn't have to be perfect, so we can
291302 // ignore the OOM here.
292- free_list .append (gpa , prev . atom_index ) catch {};
303+ free_list .append (gpa , prev_atom . ref () ) catch {};
293304 }
294305 } else {
295- self .prev_index = 0 ;
306+ self .prev_atom_ref = .{} ;
296307 }
297308
298- if (zo . atom ( self .next_index )) | next | {
299- next . prev_index = self .prev_index ;
309+ if (self .nextAtom ( elf_file )) | next_atom | {
310+ next_atom . prev_atom_ref = self .prev_atom_ref ;
300311 } else {
301- self .next_index = 0 ;
312+ self .next_atom_ref = .{} ;
302313 }
303314
304- // TODO create relocs free list
305- self .freeRelocs (zo );
306- // TODO figure out how to free input section mappind in ZigModule
307- // const zig_object = elf_file.zigObjectPtr().?
308- // assert(zig_object.atoms.swapRemove(self.atom_index));
315+ switch (self .file (elf_file ).? ) {
316+ .zig_object = > | zo | {
317+ // TODO create relocs free list
318+ self .freeRelocs (zo );
319+ // TODO figure out how to free input section mappind in ZigModule
320+ // const zig_object = elf_file.zigObjectPtr().?
321+ // assert(zig_object.atoms.swapRemove(self.atom_index));
322+ },
323+ else = > {},
324+ }
309325 self .* = .{};
310326}
311327
@@ -336,10 +352,7 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
336352 switch (target .type (elf_file )) {
337353 elf .STT_SECTION = > {
338354 r_addend += @intCast (target .address (.{}, elf_file ));
339- r_sym = if (target .outputShndx (elf_file )) | osec |
340- elf_file .sectionSymbolOutputSymtabIndex (osec )
341- else
342- 0 ;
355+ r_sym = target .outputShndx (elf_file ) orelse 0 ;
343356 },
344357 else = > {
345358 r_sym = target .outputSymtabIndex (elf_file ) orelse 0 ;
0 commit comments