@@ -59,38 +59,6 @@ pub fn outputShndx(self: Atom) ?u16 {
5959 return self .output_section_index ;
6060}
6161
62- pub fn codeInObject (self : Atom , elf_file : * Elf ) error {Overflow }! []const u8 {
63- const object = self .file (elf_file ).? .object ;
64- return object .shdrContents (self .input_section_index );
65- }
66-
67- /// Returns atom's code and optionally uncompresses data if required (for compressed sections).
68- /// Caller owns the memory.
69- pub fn codeInObjectUncompressAlloc (self : Atom , elf_file : * Elf ) ! []u8 {
70- const gpa = elf_file .base .allocator ;
71- const data = try self .codeInObject (elf_file );
72- const shdr = self .inputShdr (elf_file );
73- if (shdr .sh_flags & elf .SHF_COMPRESSED != 0 ) {
74- const chdr = @as (* align (1 ) const elf .Elf64_Chdr , @ptrCast (data .ptr )).* ;
75- switch (chdr .ch_type ) {
76- .ZLIB = > {
77- var stream = std .io .fixedBufferStream (data [@sizeOf (elf .Elf64_Chdr ).. ]);
78- var zlib_stream = std .compress .zlib .decompressStream (gpa , stream .reader ()) catch
79- return error .InputOutput ;
80- defer zlib_stream .deinit ();
81- const size = std .math .cast (usize , chdr .ch_size ) orelse return error .Overflow ;
82- const decomp = try gpa .alloc (u8 , size );
83- const nread = zlib_stream .reader ().readAll (decomp ) catch return error .InputOutput ;
84- if (nread != decomp .len ) {
85- return error .InputOutput ;
86- }
87- return decomp ;
88- },
89- else = > @panic ("TODO unhandled compression scheme" ),
90- }
91- } else return gpa .dupe (u8 , data );
92- }
93-
9462pub fn priority (self : Atom , elf_file : * Elf ) u64 {
9563 const index = self .file (elf_file ).? .index ();
9664 return (@as (u64 , @intCast (index )) << 32 ) | @as (u64 , @intCast (self .input_section_index ));
@@ -327,7 +295,15 @@ pub fn freeRelocs(self: Atom, elf_file: *Elf) void {
327295 zig_module .relocs .items [self .relocs_section_index ].clearRetainingCapacity ();
328296}
329297
330- pub fn scanRelocs (self : Atom , elf_file : * Elf , undefs : anytype ) ! void {
298+ pub fn scanRelocsRequiresCode (self : Atom , elf_file : * Elf ) error {Overflow }! bool {
299+ for (try self .relocs (elf_file )) | rel | {
300+ if (rel .r_type () == elf .R_X86_64_GOTTPOFF ) return true ;
301+ }
302+ return false ;
303+ }
304+
305+ pub fn scanRelocs (self : Atom , elf_file : * Elf , code : ? []const u8 , undefs : anytype ) ! void {
306+ const is_dyn_lib = elf_file .isDynLib ();
331307 const file_ptr = self .file (elf_file ).? ;
332308 const rels = try self .relocs (elf_file );
333309 var i : usize = 0 ;
@@ -336,6 +312,8 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, undefs: anytype) !void {
336312
337313 if (rel .r_type () == elf .R_X86_64_NONE ) continue ;
338314
315+ const r_offset = std .math .cast (usize , rel .r_offset ) orelse return error .Overflow ;
316+
339317 const symbol_index = switch (file_ptr ) {
340318 .zig_module = > | x | x .symbol (rel .r_sym ()),
341319 .object = > | x | x .symbols .items [rel .r_sym ()],
@@ -388,7 +366,54 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, undefs: anytype) !void {
388366
389367 elf .R_X86_64_PC32 = > {},
390368
391- else = > @panic ("TODO" ),
369+ elf .R_X86_64_TPOFF32 ,
370+ elf .R_X86_64_TPOFF64 ,
371+ = > {
372+ if (is_dyn_lib ) {
373+ // TODO
374+ // self.picError(symbol, rel, elf_file);
375+ }
376+ },
377+
378+ elf .R_X86_64_TLSGD = > {
379+ // TODO verify followed by appropriate relocation such as PLT32 __tls_get_addr
380+
381+ if (elf_file .isStatic () or
382+ (! symbol .flags .import and ! is_dyn_lib ))
383+ {
384+ // Relax if building with -static flag as __tls_get_addr() will not be present in libc.a
385+ // We skip the next relocation.
386+ i += 1 ;
387+ } else if (! symbol .flags .import and is_dyn_lib ) {
388+ symbol .flags .needs_gottp = true ;
389+ i += 1 ;
390+ } else {
391+ symbol .flags .needs_tlsgd = true ;
392+ }
393+ },
394+
395+ elf .R_X86_64_GOTTPOFF = > {
396+ const should_relax = blk : {
397+ // if (!elf_file.options.relax or is_shared or symbol.flags.import) break :blk false;
398+ if (! x86_64 .canRelaxGotTpOff (code .? [r_offset - 3 .. ])) break :blk false ;
399+ break :blk true ;
400+ };
401+ if (! should_relax ) {
402+ symbol .flags .needs_gottp = true ;
403+ }
404+ },
405+
406+ else = > {
407+ var err = try elf_file .addErrorWithNotes (1 );
408+ try err .addMsg (elf_file , "fatal linker error: unhandled relocation type {}" , .{
409+ fmtRelocType (rel .r_type ()),
410+ });
411+ try err .addNote (elf_file , "in {}:{s} at offset 0x{x}" , .{
412+ self .file (elf_file ).? .fmtPath (),
413+ self .name (elf_file ),
414+ r_offset ,
415+ });
416+ },
392417 }
393418 }
394419}
@@ -430,7 +455,10 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
430455 var stream = std .io .fixedBufferStream (code );
431456 const cwriter = stream .writer ();
432457
433- for (try self .relocs (elf_file )) | rel | {
458+ const rels = try self .relocs (elf_file );
459+ var i : usize = 0 ;
460+ while (i < rels .len ) : (i += 1 ) {
461+ const rel = rels [i ];
434462 const r_type = rel .r_type ();
435463 if (r_type == elf .R_X86_64_NONE ) continue ;
436464
@@ -463,9 +491,9 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
463491 // Relative offset to the start of the global offset table.
464492 const G = @as (i64 , @intCast (target .gotAddress (elf_file ))) - GOT ;
465493 // // Address of the thread pointer.
466- // const TP = @as(i64, @intCast(elf_file.getTpAddress ()));
494+ const TP = @as (i64 , @intCast (elf_file .tpAddress ()));
467495 // // Address of the dynamic thread pointer.
468- // const DTP = @as(i64, @intCast(elf_file.getDtpAddress ()));
496+ // const DTP = @as(i64, @intCast(elf_file.dtpAddress ()));
469497
470498 relocs_log .debug (" {s}: {x}: [{x} => {x}] G({x}) ({s})" , .{
471499 fmtRelocType (r_type ),
@@ -512,10 +540,43 @@ pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void {
512540 try cwriter .writeIntLittle (i32 , @as (i32 , @intCast (G + GOT + A - P )));
513541 },
514542
515- else = > {
516- log .err ("TODO: unhandled relocation type {}" , .{fmtRelocType (rel .r_type ())});
517- @panic ("TODO unhandled relocation type" );
543+ elf .R_X86_64_TPOFF32 = > try cwriter .writeIntLittle (i32 , @as (i32 , @truncate (S + A - TP ))),
544+ elf .R_X86_64_TPOFF64 = > try cwriter .writeIntLittle (i64 , S + A - TP ),
545+
546+ elf .R_X86_64_TLSGD = > {
547+ if (target .flags .has_tlsgd ) {
548+ // TODO
549+ // const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
550+ // try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
551+ } else if (target .flags .has_gottp ) {
552+ // TODO
553+ // const S_ = @as(i64, @intCast(target.getGotTpAddress(elf_file)));
554+ // try relaxTlsGdToIe(relocs[i .. i + 2], @intCast(S_ - P), elf_file, &stream);
555+ i += 1 ;
556+ } else {
557+ try x86_64 .relaxTlsGdToLe (
558+ self ,
559+ rels [i .. i + 2 ],
560+ @as (i32 , @intCast (S - TP )),
561+ elf_file ,
562+ & stream ,
563+ );
564+ i += 1 ;
565+ }
566+ },
567+
568+ elf .R_X86_64_GOTTPOFF = > {
569+ if (target .flags .has_gottp ) {
570+ // TODO
571+ // const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
572+ // try cwriter.writeIntLittle(i32, @as(i32, @intCast(S_ + A - P)));
573+ } else {
574+ x86_64 .relaxGotTpOff (code [r_offset - 3 .. ]) catch unreachable ;
575+ try cwriter .writeIntLittle (i32 , @as (i32 , @intCast (S - TP )));
576+ }
518577 },
578+
579+ else = > {},
519580 }
520581 }
521582}
@@ -681,6 +742,80 @@ const x86_64 = struct {
681742 }
682743 }
683744
745+ pub fn canRelaxGotTpOff (code : []const u8 ) bool {
746+ const old_inst = disassemble (code ) orelse return false ;
747+ switch (old_inst .encoding .mnemonic ) {
748+ .mov = > if (Instruction .new (old_inst .prefix , .mov , &.{
749+ old_inst .ops [0 ],
750+ // TODO: hack to force imm32s in the assembler
751+ .{ .imm = Immediate .s (-129 ) },
752+ })) | inst | {
753+ inst .encode (std .io .null_writer , .{}) catch return false ;
754+ return true ;
755+ } else | _ | return false ,
756+ else = > return false ,
757+ }
758+ }
759+
760+ pub fn relaxGotTpOff (code : []u8 ) ! void {
761+ const old_inst = disassemble (code ) orelse return error .RelaxFail ;
762+ switch (old_inst .encoding .mnemonic ) {
763+ .mov = > {
764+ const inst = try Instruction .new (old_inst .prefix , .mov , &.{
765+ old_inst .ops [0 ],
766+ // TODO: hack to force imm32s in the assembler
767+ .{ .imm = Immediate .s (-129 ) },
768+ });
769+ relocs_log .debug (" relaxing {} => {}" , .{ old_inst .encoding , inst .encoding });
770+ encode (&.{inst }, code ) catch return error .RelaxFail ;
771+ },
772+ else = > return error .RelaxFail ,
773+ }
774+ }
775+
776+ pub fn relaxTlsGdToLe (
777+ self : Atom ,
778+ rels : []align (1 ) const elf.Elf64_Rela ,
779+ value : i32 ,
780+ elf_file : * Elf ,
781+ stream : anytype ,
782+ ) ! void {
783+ assert (rels .len == 2 );
784+ const writer = stream .writer ();
785+ switch (rels [1 ].r_type ()) {
786+ elf .R_X86_64_PC32 ,
787+ elf .R_X86_64_PLT32 ,
788+ elf .R_X86_64_GOTPCREL ,
789+ elf .R_X86_64_GOTPCRELX ,
790+ = > {
791+ var insts = [_ ]u8 {
792+ 0x64 , 0x48 , 0x8b , 0x04 , 0x25 , 0 , 0 , 0 , 0 , // movq %fs:0,%rax
793+ 0x48 , 0x81 , 0xc0 , 0 , 0 , 0 , 0 , // add $tp_offset, %rax
794+ };
795+ std .mem .writeIntLittle (i32 , insts [12.. ][0.. 4], value );
796+ try stream .seekBy (-4 );
797+ try writer .writeAll (& insts );
798+ relocs_log .debug (" relaxing {} and {}" , .{
799+ fmtRelocType (rels [0 ].r_type ()),
800+ fmtRelocType (rels [1 ].r_type ()),
801+ });
802+ },
803+
804+ else = > {
805+ var err = try elf_file .addErrorWithNotes (1 );
806+ try err .addMsg (elf_file , "fatal linker error: rewrite {} when followed by {}" , .{
807+ fmtRelocType (rels [0 ].r_type ()),
808+ fmtRelocType (rels [1 ].r_type ()),
809+ });
810+ try err .addNote (elf_file , "in {}:{s} at offset 0x{x}" , .{
811+ self .file (elf_file ).? .fmtPath (),
812+ self .name (elf_file ),
813+ rels [0 ].r_offset ,
814+ });
815+ },
816+ }
817+ }
818+
684819 fn disassemble (code : []const u8 ) ? Instruction {
685820 var disas = Disassembler .init (code );
686821 const inst = disas .next () catch return null ;
0 commit comments