Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 154 additions & 16 deletions src/codegen.zig
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.dbgSetEpilogueBegin();
}
},
.arm => {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
// push {fp, lr}
// mov fp, sp
// sub sp, sp, #reloc
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.push(.al, .{ .fp, .lr }).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .fp, Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none)).toU32());
// TODO: prepare stack for local variables
// const backpatch_reloc = try self.code.addManyAsArray(4);

try self.dbgSetPrologueEnd();

try self.genBody(self.mod_fn.analysis.success);

// Backpatch stack offset
// const stack_end = self.max_end_stack;
// const aligned_stack_end = mem.alignForward(stack_end, self.stack_align);
// mem.writeIntLittle(u32, backpatch_reloc, Instruction.sub(.al, .sp, .sp, Instruction.Operand.imm()));

try self.dbgSetEpilogueBegin();

// mov sp, fp
// pop {fp, pc}
// TODO: return by jumping to this code, use relocations
// mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .sp, Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none)).toU32());
// mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.pop(.al, .{ .fp, .pc }).toU32());
} else {
try self.dbgSetPrologueEnd();
try self.genBody(self.mod_fn.analysis.success);
try self.dbgSetEpilogueBegin();
}
},
else => {
try self.dbgSetPrologueEnd();
try self.genBody(self.mod_fn.analysis.success);
Expand Down Expand Up @@ -1461,7 +1494,35 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.arm => {
if (info.args.len > 0) return self.fail(inst.base.src, "TODO implement fn args for {}", .{self.target.cpu.arch});
for (info.args) |mc_arg, arg_i| {
const arg = inst.args[arg_i];
const arg_mcv = try self.resolveInst(inst.args[arg_i]);

switch (mc_arg) {
.none => continue,
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.register => |reg| {
try self.genSetReg(arg.src, reg, arg_mcv);
// TODO interact with the register allocator to mark the instruction as moved.
},
.stack_offset => {
return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
}
}

if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
Expand All @@ -1476,13 +1537,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
else
unreachable;

// TODO only works with leaf functions
// at the moment, which works fine for
// Hello World, but not for real code
// of course. Add pushing lr to stack
// and popping after call
try self.genSetReg(inst.base.src, .lr, .{ .memory = got_addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.blx(.al, .lr).toU32());

// TODO: add Instruction.supportedOn
// function for ARM
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.blx(.al, .lr).toU32());
} else {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .lr, Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.bx(.al, .lr).toU32());
}
} else {
return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{});
}
Expand Down Expand Up @@ -1601,7 +1665,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.zero, 0, .ra).toU32());
},
.arm => {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.bx(.al, .lr).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, .sp, Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.pop(.al, .{ .fp, .pc }).toU32());
// TODO: jump to the end with relocation
// // Just add space for an instruction, patch this later
// try self.code.resize(self.code.items.len + 4);
// try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4);
},
else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}),
}
Expand Down Expand Up @@ -2213,14 +2282,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// least amount of necessary instructions (use
// more intelligent rotating)
if (x <= math.maxInt(u8)) {
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, 0, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
return;
} else if (x <= math.maxInt(u16)) {
// TODO Use movw Note: Not supported on
// all ARM targets!

mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, 0, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, 0, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 8), 12)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 8), 12)).toU32());
} else if (x <= math.maxInt(u32)) {
// TODO Use movw and movt Note: Not
// supported on all ARM targets! Also TODO
Expand All @@ -2232,20 +2301,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// orr reg, reg, #0xbb, 24
// orr reg, reg, #0xcc, 16
// orr reg, reg, #0xdd, 8
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, 0, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, 0, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 8), 12)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, 0, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 16), 8)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, 0, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 24), 4)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.imm(@truncate(u8, x), 0)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 8), 12)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 16), 8)).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(.al, reg, reg, Instruction.Operand.imm(@truncate(u8, x >> 24), 4)).toU32());
return;
} else {
return self.fail(src, "ARM registers are 32-bit wide", .{});
}
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
if (src_reg.id() == reg.id())
return;

// mov reg, src_reg
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, Instruction.Operand.reg(src_reg, Instruction.Operand.Shift.none)).toU32());
},
.memory => |addr| {
// The value is in memory at a hard-coded address.
// If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(src, reg, .{ .immediate = addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, Instruction.Offset.none).toU32());
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, .{ .offset = Instruction.Offset.none }).toU32());
},
else => return self.fail(src, "TODO implement getSetReg for arm {}", .{mcv}),
},
Expand Down Expand Up @@ -2701,6 +2778,55 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
else => return self.fail(src, "TODO implement function parameters for {} on x86_64", .{cc}),
}
},
.arm => {
switch (cc) {
.Naked => {
assert(result.args.len == 0);
result.return_value = .{ .unreach = {} };
result.stack_byte_count = 0;
result.stack_align = 1;
return result;
},
.Unspecified, .C => {
// ARM Procedure Call Standard, Chapter 6.5
var ncrn: usize = 0; // Next Core Register Number
var nsaa: u32 = 0; // Next stacked argument address

for (param_types) |ty, i| {
if (ty.abiAlignment(self.target.*) == 8) {
// Round up NCRN to the next even number
ncrn += ncrn % 2;
}

const param_size = @intCast(u32, ty.abiSize(self.target.*));
if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) {
if (param_size <= 4) {
result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] };
ncrn += 1;
} else {
return self.fail(src, "TODO MCValues with multiple registers", .{});
}
} else if (ncrn < 4 and nsaa == 0) {
return self.fail(src, "TODO MCValues split between registers and stack", .{});
} else {
ncrn = 4;
if (ty.abiAlignment(self.target.*) == 8) {
if (nsaa % 8 != 0) {
nsaa += 8 - (nsaa % 8);
}
}

result.args[i] = .{ .stack_offset = nsaa };
nsaa += param_size;
}
}

result.stack_byte_count = nsaa;
result.stack_align = 4;
},
else => return self.fail(src, "TODO implement function parameters for {} on arm", .{cc}),
}
},
else => if (param_types.len != 0)
return self.fail(src, "TODO implement codegen parameters for {}", .{self.target.cpu.arch}),
}
Expand All @@ -2719,6 +2845,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
else => return self.fail(src, "TODO implement function return values for {}", .{cc}),
},
.arm => switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
if (ret_ty_size <= 4) {
result.return_value = .{ .register = c_abi_int_return_regs[0] };
} else {
return self.fail(src, "TODO support more return types for ARM backend", .{});
}
},
else => return self.fail(src, "TODO implement function return values for {}", .{cc}),
},
else => return self.fail(src, "TODO implement codegen return values for {}", .{self.target.cpu.arch}),
}
return result;
Expand Down
Loading