Skip to content

Commit 8236481

Browse files
committed
Add LoopVectoriser support for llvm.vector.partial.reduce.fadd
1 parent 98e35df commit 8236481

File tree

6 files changed

+199
-12
lines changed

6 files changed

+199
-12
lines changed

llvm/include/llvm/Analysis/TargetTransformInfo.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,12 @@ class TargetTransformInfoImplBase;
222222
/// for IR-level transformations.
223223
class TargetTransformInfo {
224224
public:
225-
enum PartialReductionExtendKind { PR_None, PR_SignExtend, PR_ZeroExtend };
225+
enum PartialReductionExtendKind {
226+
PR_None,
227+
PR_SignExtend,
228+
PR_ZeroExtend,
229+
PR_FPExtend
230+
};
226231

227232
/// Get the kind of extension that an instruction represents.
228233
LLVM_ABI static PartialReductionExtendKind

llvm/lib/Analysis/TargetTransformInfo.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1005,6 +1005,8 @@ TargetTransformInfo::getPartialReductionExtendKind(Instruction *I) {
10051005
return PR_SignExtend;
10061006
if (isa<ZExtInst>(I))
10071007
return PR_ZeroExtend;
1008+
if (isa<FPExtInst>(I))
1009+
return PR_FPExtend;
10081010
return PR_None;
10091011
}
10101012

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5640,7 +5640,8 @@ InstructionCost AArch64TTIImpl::getPartialReductionCost(
56405640
(!ST->isNeonAvailable() || !ST->hasDotProd()))
56415641
return Invalid;
56425642

5643-
if ((Opcode != Instruction::Add && Opcode != Instruction::Sub) ||
5643+
if ((Opcode != Instruction::Add && Opcode != Instruction::Sub &&
5644+
Opcode != Instruction::FAdd) ||
56445645
OpAExtend == TTI::PR_None)
56455646
return Invalid;
56465647

@@ -5650,7 +5651,8 @@ InstructionCost AArch64TTIImpl::getPartialReductionCost(
56505651

56515652
// We only support multiply binary operations for now, and for muls we
56525653
// require the types being extended to be the same.
5653-
if (BinOp && (*BinOp != Instruction::Mul || InputTypeA != InputTypeB))
5654+
if (BinOp && ((*BinOp != Instruction::Mul && *BinOp != Instruction::FMul) ||
5655+
InputTypeA != InputTypeB))
56545656
return Invalid;
56555657

56565658
bool IsUSDot = OpBExtend != TTI::PR_None && OpAExtend != OpBExtend;

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5313,14 +5313,16 @@ LoopVectorizationCostModel::getReductionPatternCost(Instruction *I,
53135313
// it is not we return an invalid cost specifying the orignal cost method
53145314
// should be used.
53155315
Instruction *RetI = I;
5316-
if (match(RetI, m_ZExtOrSExt(m_Value()))) {
5316+
if (match(RetI, m_ZExtOrSExt(m_Value())) || match(RetI, m_FPExt(m_Value()))) {
53175317
if (!RetI->hasOneUser())
53185318
return std::nullopt;
53195319
RetI = RetI->user_back();
53205320
}
53215321

5322-
if (match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5323-
RetI->user_back()->getOpcode() == Instruction::Add) {
5322+
if ((match(RetI, m_OneUse(m_Mul(m_Value(), m_Value()))) &&
5323+
RetI->user_back()->getOpcode() == Instruction::Add) ||
5324+
(match(RetI, m_OneUse(m_FMul(m_Value(), m_Value()))) &&
5325+
RetI->user_back()->getOpcode() == Instruction::FAdd)) {
53245326
RetI = RetI->user_back();
53255327
}
53265328

@@ -7967,7 +7969,8 @@ bool VPRecipeBuilder::getScaledReductions(
79677969
continue;
79687970
}
79697971
Value *ExtOp;
7970-
if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))))
7972+
if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp))) &&
7973+
!match(OpI, m_FPExt(m_Value(ExtOp))))
79717974
return false;
79727975
Exts[I] = cast<Instruction>(OpI);
79737976

@@ -8138,6 +8141,8 @@ VPRecipeBuilder::tryToCreatePartialReduction(Instruction *Reduction,
81388141
return nullptr;
81398142

81408143
unsigned ReductionOpcode = Reduction->getOpcode();
8144+
if (ReductionOpcode == Instruction::FAdd && !Reduction->hasAllowReassoc())
8145+
return nullptr;
81418146
if (ReductionOpcode == Instruction::Sub) {
81428147
auto *const Zero = ConstantInt::get(Reduction->getType(), 0);
81438148
SmallVector<VPValue *, 2> Ops;

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -322,6 +322,8 @@ VPPartialReductionRecipe::computeCost(ElementCount VF,
322322
return TTI::PR_ZeroExtend;
323323
if (WidenCastR->getOpcode() == Instruction::CastOps::SExt)
324324
return TTI::PR_SignExtend;
325+
if (WidenCastR->getOpcode() == Instruction::CastOps::FPExt)
326+
return TTI::PR_FPExtend;
325327
return TTI::PR_None;
326328
};
327329

@@ -374,18 +376,30 @@ VPPartialReductionRecipe::computeCost(ElementCount VF,
374376
void VPPartialReductionRecipe::execute(VPTransformState &State) {
375377
auto &Builder = State.Builder;
376378

377-
assert(getOpcode() == Instruction::Add &&
378-
"Unhandled partial reduction opcode");
379+
assert(
380+
(getOpcode() == Instruction::Add || getOpcode() == Instruction::FAdd) &&
381+
"Unhandled partial reduction opcode");
379382

380383
Value *BinOpVal = State.get(getOperand(1));
381384
Value *PhiVal = State.get(getOperand(0));
382385
assert(PhiVal && BinOpVal && "Phi and Mul must be set");
383386

384387
Type *RetTy = PhiVal->getType();
385388

386-
CallInst *V =
387-
Builder.CreateIntrinsic(RetTy, Intrinsic::vector_partial_reduce_add,
388-
{PhiVal, BinOpVal}, nullptr, "partial.reduce");
389+
enum llvm::Intrinsic::IndependentIntrinsics PRIntrinsic;
390+
switch (getOpcode()) {
391+
case Instruction::Add: {
392+
PRIntrinsic = Intrinsic::vector_partial_reduce_add;
393+
break;
394+
}
395+
case Instruction::FAdd: {
396+
PRIntrinsic = Intrinsic::vector_partial_reduce_fadd;
397+
break;
398+
}
399+
}
400+
401+
CallInst *V = Builder.CreateIntrinsic(RetTy, PRIntrinsic, {PhiVal, BinOpVal},
402+
nullptr, "partial.reduce");
389403

390404
State.set(this, V);
391405
}
Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,159 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
2+
; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -enable-epilogue-vectorization=false -mattr=+sve2p1,+dotprod -S < %s | FileCheck %s --check-prefixes=CHECK-INTERLEAVE1
3+
; RUN: opt -passes=loop-vectorize -enable-epilogue-vectorization=false -mattr=+sve2p1,+dotprod -S < %s | FileCheck %s --check-prefixes=CHECK-INTERLEAVED
4+
; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -vectorizer-maximize-bandwidth -enable-epilogue-vectorization=false -mattr=+sve2p1,+dotprod -S < %s | FileCheck %s --check-prefixes=CHECK-MAXBW
5+
6+
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
7+
target triple = "aarch64-none-unknown-elf"
8+
9+
define float @fdotp(ptr %a, ptr %b) #0 {
10+
; CHECK-INTERLEAVE1-LABEL: define float @fdotp(
11+
; CHECK-INTERLEAVE1-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
12+
; CHECK-INTERLEAVE1-NEXT: [[ENTRY:.*:]]
13+
; CHECK-INTERLEAVE1-NEXT: br label %[[VECTOR_PH:.*]]
14+
; CHECK-INTERLEAVE1: [[VECTOR_PH]]:
15+
; CHECK-INTERLEAVE1-NEXT: br label %[[VECTOR_BODY:.*]]
16+
; CHECK-INTERLEAVE1: [[VECTOR_BODY]]:
17+
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
18+
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ <float 0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
19+
; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr half, ptr [[A]], i64 [[INDEX]]
20+
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[TMP0]], align 1
21+
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = fpext <8 x half> [[WIDE_LOAD]] to <8 x float>
22+
; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr half, ptr [[B]], i64 [[INDEX]]
23+
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x half>, ptr [[TMP2]], align 1
24+
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = fpext <8 x half> [[WIDE_LOAD1]] to <8 x float>
25+
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = fmul <8 x float> [[TMP3]], [[TMP1]]
26+
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x float> @llvm.vector.partial.reduce.fadd.v4f32.v8f32(<4 x float> [[VEC_PHI]], <8 x float> [[TMP4]])
27+
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
28+
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
29+
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
30+
; CHECK-INTERLEAVE1: [[MIDDLE_BLOCK]]:
31+
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call reassoc float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PARTIAL_REDUCE]])
32+
; CHECK-INTERLEAVE1-NEXT: br label %[[FOR_EXIT:.*]]
33+
; CHECK-INTERLEAVE1: [[FOR_EXIT]]:
34+
; CHECK-INTERLEAVE1-NEXT: ret float [[TMP7]]
35+
;
36+
; CHECK-INTERLEAVED-LABEL: define float @fdotp(
37+
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
38+
; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*:]]
39+
; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_PH:.*]]
40+
; CHECK-INTERLEAVED: [[VECTOR_PH]]:
41+
; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_BODY:.*]]
42+
; CHECK-INTERLEAVED: [[VECTOR_BODY]]:
43+
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
44+
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ <float 0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
45+
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x float> [ splat (float -0.000000e+00), %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], %[[VECTOR_BODY]] ]
46+
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr half, ptr [[A]], i64 [[INDEX]]
47+
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr half, ptr [[TMP0]], i32 8
48+
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[TMP0]], align 1
49+
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x half>, ptr [[TMP1]], align 1
50+
; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = fpext <8 x half> [[WIDE_LOAD]] to <8 x float>
51+
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = fpext <8 x half> [[WIDE_LOAD2]] to <8 x float>
52+
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr half, ptr [[B]], i64 [[INDEX]]
53+
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr half, ptr [[TMP4]], i32 8
54+
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x half>, ptr [[TMP4]], align 1
55+
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x half>, ptr [[TMP5]], align 1
56+
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = fpext <8 x half> [[WIDE_LOAD3]] to <8 x float>
57+
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = fpext <8 x half> [[WIDE_LOAD4]] to <8 x float>
58+
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = fmul <8 x float> [[TMP6]], [[TMP2]]
59+
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = fmul <8 x float> [[TMP7]], [[TMP3]]
60+
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x float> @llvm.vector.partial.reduce.fadd.v4f32.v8f32(<4 x float> [[VEC_PHI]], <8 x float> [[TMP8]])
61+
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x float> @llvm.vector.partial.reduce.fadd.v4f32.v8f32(<4 x float> [[VEC_PHI1]], <8 x float> [[TMP9]])
62+
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
63+
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
64+
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
65+
; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]:
66+
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <4 x float> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
67+
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call reassoc float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]])
68+
; CHECK-INTERLEAVED-NEXT: br label %[[FOR_EXIT:.*]]
69+
; CHECK-INTERLEAVED: [[FOR_EXIT]]:
70+
; CHECK-INTERLEAVED-NEXT: ret float [[TMP11]]
71+
;
72+
; CHECK-MAXBW-LABEL: define float @fdotp(
73+
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
74+
; CHECK-MAXBW-NEXT: [[ENTRY:.*]]:
75+
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
76+
; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP4]], 3
77+
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
78+
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
79+
; CHECK-MAXBW: [[VECTOR_PH]]:
80+
; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
81+
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP6]], 8
82+
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
83+
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
84+
; CHECK-MAXBW-NEXT: br label %[[VECTOR_BODY:.*]]
85+
; CHECK-MAXBW: [[VECTOR_BODY]]:
86+
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
87+
; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
88+
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = getelementptr half, ptr [[A]], i64 [[INDEX]]
89+
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP0]], align 1
90+
; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = fpext <vscale x 8 x half> [[WIDE_LOAD]] to <vscale x 8 x float>
91+
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = getelementptr half, ptr [[B]], i64 [[INDEX]]
92+
; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 1
93+
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = fpext <vscale x 8 x half> [[WIDE_LOAD1]] to <vscale x 8 x float>
94+
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = fmul <vscale x 8 x float> [[TMP7]], [[TMP5]]
95+
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x float> @llvm.vector.partial.reduce.fadd.nxv4f32.nxv8f32(<vscale x 4 x float> [[VEC_PHI]], <vscale x 8 x float> [[TMP8]])
96+
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
97+
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
98+
; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
99+
; CHECK-MAXBW: [[MIDDLE_BLOCK]]:
100+
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[PARTIAL_REDUCE]])
101+
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
102+
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label %[[FOR_EXIT:.*]], label %[[SCALAR_PH]]
103+
; CHECK-MAXBW: [[SCALAR_PH]]:
104+
; CHECK-MAXBW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
105+
; CHECK-MAXBW-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
106+
; CHECK-MAXBW-NEXT: br label %[[FOR_BODY:.*]]
107+
; CHECK-MAXBW: [[FOR_BODY]]:
108+
; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
109+
; CHECK-MAXBW-NEXT: [[ACCUM:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
110+
; CHECK-MAXBW-NEXT: [[GEP_A:%.*]] = getelementptr half, ptr [[A]], i64 [[IV]]
111+
; CHECK-MAXBW-NEXT: [[LOAD_A:%.*]] = load half, ptr [[GEP_A]], align 1
112+
; CHECK-MAXBW-NEXT: [[EXT_A:%.*]] = fpext half [[LOAD_A]] to float
113+
; CHECK-MAXBW-NEXT: [[GEP_B:%.*]] = getelementptr half, ptr [[B]], i64 [[IV]]
114+
; CHECK-MAXBW-NEXT: [[LOAD_B:%.*]] = load half, ptr [[GEP_B]], align 1
115+
; CHECK-MAXBW-NEXT: [[EXT_B:%.*]] = fpext half [[LOAD_B]] to float
116+
; CHECK-MAXBW-NEXT: [[MUL:%.*]] = fmul float [[EXT_B]], [[EXT_A]]
117+
; CHECK-MAXBW-NEXT: [[ADD]] = fadd reassoc float [[MUL]], [[ACCUM]]
118+
; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
119+
; CHECK-MAXBW-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
120+
; CHECK-MAXBW-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
121+
; CHECK-MAXBW: [[FOR_EXIT]]:
122+
; CHECK-MAXBW-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
123+
; CHECK-MAXBW-NEXT: ret float [[ADD_LCSSA]]
124+
;
125+
entry:
126+
br label %for.body
127+
128+
for.body: ; preds = %for.body, %entry
129+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
130+
%accum = phi float [ 0.0, %entry ], [ %add, %for.body ]
131+
%gep.a = getelementptr half, ptr %a, i64 %iv
132+
%load.a = load half, ptr %gep.a, align 1
133+
%ext.a = fpext half %load.a to float
134+
%gep.b = getelementptr half, ptr %b, i64 %iv
135+
%load.b = load half, ptr %gep.b, align 1
136+
%ext.b = fpext half %load.b to float
137+
%mul = fmul float %ext.b, %ext.a
138+
%add = fadd reassoc float %mul, %accum
139+
%iv.next = add i64 %iv, 1
140+
%exitcond.not = icmp eq i64 %iv.next, 1024
141+
br i1 %exitcond.not, label %for.exit, label %for.body
142+
143+
for.exit: ; preds = %for.body
144+
ret float %add
145+
}
146+
;.
147+
; CHECK-INTERLEAVE1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
148+
; CHECK-INTERLEAVE1: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
149+
; CHECK-INTERLEAVE1: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
150+
;.
151+
; CHECK-INTERLEAVED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
152+
; CHECK-INTERLEAVED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
153+
; CHECK-INTERLEAVED: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
154+
;.
155+
; CHECK-MAXBW: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
156+
; CHECK-MAXBW: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
157+
; CHECK-MAXBW: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
158+
; CHECK-MAXBW: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
159+
;.

0 commit comments

Comments
 (0)