Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 49 additions & 9 deletions llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "RISCVSubtarget.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"

using namespace llvm;

Expand Down Expand Up @@ -56,19 +57,38 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
MachineInstrBuilder MIB)
: OutgoingValueHandler(B, MRI), MIB(MIB) {}

MachineInstrBuilder MIB;

: OutgoingValueHandler(B, MRI), MIB(MIB),
Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
Register getStackAddress(uint64_t MemSize, int64_t Offset,
MachinePointerInfo &MPO,
ISD::ArgFlagsTy Flags) override {
llvm_unreachable("not implemented");
MachineFunction &MF = MIRBuilder.getMF();
LLT p0 = LLT::pointer(0, Subtarget.getXLen());
LLT sXLen = LLT::scalar(Subtarget.getXLen());

if (!SPReg)
SPReg = MIRBuilder.buildCopy(p0, Register(RISCV::X2)).getReg(0);

auto OffsetReg = MIRBuilder.buildConstant(sXLen, Offset);

auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);

MPO = MachinePointerInfo::getStack(MF, Offset);
return AddrReg.getReg(0);
}

void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
MachinePointerInfo &MPO, CCValAssign &VA) override {
llvm_unreachable("not implemented");
MachineFunction &MF = MIRBuilder.getMF();
uint64_t LocMemOffset = VA.getLocMemOffset();

// TODO: Move StackAlignment to subtarget and share with FrameLowering.
auto MMO =
MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
commonAlignment(Align(16), LocMemOffset));

Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
}

void assignValueToReg(Register ValVReg, Register PhysReg,
Expand All @@ -77,6 +97,14 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
MIRBuilder.buildCopy(PhysReg, ExtReg);
MIB.addUse(PhysReg, RegState::Implicit);
}

private:
MachineInstrBuilder MIB;

// Cache the SP register vreg if we need it more than once in this call site.
Register SPReg;

const RISCVSubtarget &Subtarget;
};

struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
Expand Down Expand Up @@ -112,17 +140,26 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {

struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
: IncomingValueHandler(B, MRI) {}
: IncomingValueHandler(B, MRI),
Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}

Register getStackAddress(uint64_t MemSize, int64_t Offset,
MachinePointerInfo &MPO,
ISD::ArgFlagsTy Flags) override {
llvm_unreachable("not implemented");
MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();

int FI = MFI.CreateFixedObject(MemSize, Offset, /*Immutable=*/true);
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
return MIRBuilder.buildFrameIndex(LLT::pointer(0, Subtarget.getXLen()), FI)
.getReg(0);
}

void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
MachinePointerInfo &MPO, CCValAssign &VA) override {
llvm_unreachable("not implemented");
MachineFunction &MF = MIRBuilder.getMF();
auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, MemTy,
inferAlignFromPtrInfo(MF, MPO));
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
}

void assignValueToReg(Register ValVReg, Register PhysReg,
Expand All @@ -131,6 +168,9 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
MIRBuilder.getMBB().addLiveIn(PhysReg);
MIRBuilder.buildCopy(ValVReg, PhysReg);
}

private:
const RISCVSubtarget &Subtarget;
};

struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,259 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
; RUN: llc -mtriple=riscv32 \
; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi ilp32f \
; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d \
; RUN: -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s

; This file contains tests that should have identical output for the ilp32,
; ilp32f, and ilp32d ABIs. i.e. where no arguments are passed according to
; the floating point ABI.

; Check that on RV32, i64 is passed in a pair of registers. Unlike
; the convention for varargs, this need not be an aligned pair.

define i32 @callee_i64_in_regs(i32 %a, i64 %b) nounwind {
; RV32I-LABEL: name: callee_i64_in_regs
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11, $x12
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32I-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[TRUNC]]
; RV32I-NEXT: $x10 = COPY [[ADD]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%b_trunc = trunc i64 %b to i32
%1 = add i32 %a, %b_trunc
ret i32 %1
}

define i32 @caller_i64_in_regs() nounwind {
; RV32I-LABEL: name: caller_i64_in_regs
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; RV32I-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
; RV32I-NEXT: $x10 = COPY [[C]](s32)
; RV32I-NEXT: $x11 = COPY [[UV]](s32)
; RV32I-NEXT: $x12 = COPY [[UV1]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_i64_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: $x10 = COPY [[COPY]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%1 = call i32 @callee_i64_in_regs(i32 1, i64 2)
ret i32 %1
}

; Check that the stack is used once the GPRs are exhausted

define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i64 %g, i32 %h) nounwind {
; RV32I-LABEL: name: callee_many_scalars
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
; RV32I-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY3]](s32), [[COPY4]](s32)
; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
; RV32I-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
; RV32I-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
; RV32I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.1, align 16)
; RV32I-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY7]](s32), [[LOAD]](s32)
; RV32I-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; RV32I-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %fixed-stack.0)
; RV32I-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
; RV32I-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
; RV32I-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[COPY2]]
; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[MV]](s64), [[MV1]]
; RV32I-NEXT: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
; RV32I-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ADD1]]
; RV32I-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[COPY5]]
; RV32I-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[COPY6]]
; RV32I-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[LOAD1]]
; RV32I-NEXT: $x10 = COPY [[ADD5]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%a_ext = zext i8 %a to i32
%b_ext = zext i16 %b to i32
%1 = add i32 %a_ext, %b_ext
%2 = add i32 %1, %c
%3 = icmp eq i64 %d, %g
%4 = zext i1 %3 to i32
%5 = add i32 %4, %2
%6 = add i32 %5, %e
%7 = add i32 %6, %f
%8 = add i32 %7, %h
ret i32 %8
}

define i32 @caller_many_scalars() nounwind {
; RV32I-LABEL: name: caller_many_scalars
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; RV32I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; RV32I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
; RV32I-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
; RV32I-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C3]](s64)
; RV32I-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C6]](s64)
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
; RV32I-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s32)
; RV32I-NEXT: G_STORE [[UV3]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
; RV32I-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; RV32I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s32)
; RV32I-NEXT: G_STORE [[C7]](s32), [[PTR_ADD1]](p0) :: (store (s32) into stack + 4)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[ANYEXT1]](s32)
; RV32I-NEXT: $x12 = COPY [[C2]](s32)
; RV32I-NEXT: $x13 = COPY [[UV]](s32)
; RV32I-NEXT: $x14 = COPY [[UV1]](s32)
; RV32I-NEXT: $x15 = COPY [[C4]](s32)
; RV32I-NEXT: $x16 = COPY [[C5]](s32)
; RV32I-NEXT: $x17 = COPY [[UV2]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: $x10 = COPY [[COPY1]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8)
ret i32 %1
}

; Check return of 2x xlen scalars

define i64 @callee_small_scalar_ret() nounwind {
; RV32I-LABEL: name: callee_small_scalar_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1234567898765
; RV32I-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
; RV32I-NEXT: $x10 = COPY [[UV]](s32)
; RV32I-NEXT: $x11 = COPY [[UV1]](s32)
; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
ret i64 1234567898765
}

define i32 @caller_small_scalar_ret() nounwind {
; RV32I-LABEL: name: caller_small_scalar_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 987654321234567
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32I-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s64), [[MV]]
; RV32I-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
; RV32I-NEXT: $x10 = COPY [[ZEXT]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%1 = call i64 @callee_small_scalar_ret()
%2 = icmp eq i64 987654321234567, %1
%3 = zext i1 %2 to i32
ret i32 %3
}

; Check return of 2x xlen structs

%struct.small = type { i32, ptr }

define %struct.small @callee_small_struct_ret() nounwind {
; RV32I-LABEL: name: callee_small_struct_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
; RV32I-NEXT: $x10 = COPY [[C]](s32)
; RV32I-NEXT: $x11 = COPY [[C1]](p0)
; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
ret %struct.small { i32 1, ptr null }
}

define i32 @caller_small_struct_ret() nounwind {
; RV32I-LABEL: name: caller_small_struct_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
; RV32I-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p0)
; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[PTRTOINT]]
; RV32I-NEXT: $x10 = COPY [[ADD]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%1 = call %struct.small @callee_small_struct_ret()
%2 = extractvalue %struct.small %1, 0
%3 = extractvalue %struct.small %1, 1
%4 = ptrtoint ptr %3 to i32
%5 = add i32 %2, %4
ret i32 %5
}

; Check return of >2x xlen structs

%struct.large = type { i32, i32, i32, i32 }

define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind {
; RV32I-LABEL: name: callee_large_struct_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; RV32I-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; RV32I-NEXT: G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.agg.result)
; RV32I-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s32)
; RV32I-NEXT: G_STORE [[C1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.b)
; RV32I-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; RV32I-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s32)
; RV32I-NEXT: G_STORE [[C2]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %ir.c)
; RV32I-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; RV32I-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s32)
; RV32I-NEXT: G_STORE [[C3]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %ir.d)
; RV32I-NEXT: PseudoRET
store i32 1, ptr %agg.result, align 4
%b = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 1
store i32 2, ptr %b, align 4
%c = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 2
store i32 3, ptr %c, align 4
%d = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 3
store i32 4, ptr %d, align 4
ret void
}

define i32 @caller_large_struct_ret() nounwind {
; RV32I-LABEL: name: caller_large_struct_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; RV32I-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, implicit-def $x1, implicit $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
; RV32I-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s32) from %ir.3)
; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
; RV32I-NEXT: $x10 = COPY [[ADD]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
%1 = alloca %struct.large
call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
%2 = load i32, ptr %1
%3 = getelementptr inbounds %struct.large, ptr %1, i32 0, i32 3
%4 = load i32, ptr %3
%5 = add i32 %2, %4
ret i32 %5
}
Loading