diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 931a5b7582c4e..343df72dfe7ba 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -70,6 +70,7 @@ bool VPRecipeBase::mayWriteToMemory() const { return cast(this)->mayWriteToMemory(); case VPCanonicalIVPHISC: case VPBranchOnMaskSC: + case VPDerivedIVSC: case VPFirstOrderRecurrencePHISC: case VPReductionPHISC: case VPScalarIVStepsSC: @@ -86,6 +87,7 @@ bool VPRecipeBase::mayWriteToMemory() const { case VPWidenLoadEVLSC: case VPWidenLoadSC: case VPWidenPHISC: + case VPWidenPointerInductionSC: case VPWidenSC: case VPWidenSelectSC: { const Instruction *I = @@ -119,6 +121,7 @@ bool VPRecipeBase::mayReadFromMemory() const { case VPWidenIntrinsicSC: return cast(this)->mayReadFromMemory(); case VPBranchOnMaskSC: + case VPDerivedIVSC: case VPFirstOrderRecurrencePHISC: case VPPredInstPHISC: case VPScalarIVStepsSC: @@ -134,6 +137,7 @@ bool VPRecipeBase::mayReadFromMemory() const { case VPWidenGEPSC: case VPWidenIntOrFpInductionSC: case VPWidenPHISC: + case VPWidenPointerInductionSC: case VPWidenSC: case VPWidenSelectSC: { const Instruction *I = diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 84817d78a077a..f38193cf11cb8 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -130,6 +130,24 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( return true; } +/// Return true if we do not know how to (mechanically) hoist or sink \p R out +/// of a loop region. +static bool cannotHoistOrSinkRecipe(const VPRecipeBase &R) { + // Assumes don't alias anything or throw; as long as they're guaranteed to + // execute, they're safe to hoist. + if (match(&R, m_Intrinsic())) + return false; + + // TODO: Relax checks in the future, e.g. we could also hoist reads, if their + // memory location is not modified in the vector loop. + if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi()) + return true; + + // Allocas cannot be hoisted. + auto *RepR = dyn_cast(&R); + return RepR && RepR->getOpcode() == Instruction::Alloca; +} + static bool sinkScalarOperands(VPlan &Plan) { auto Iter = vp_depth_first_deep(Plan.getEntry()); bool Changed = false; @@ -1825,7 +1843,7 @@ sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, VPDT.properlyDominates(Previous, SinkCandidate)) return true; - if (SinkCandidate->mayHaveSideEffects()) + if (cannotHoistOrSinkRecipe(*SinkCandidate)) return false; WorkList.push_back(SinkCandidate); @@ -1865,7 +1883,7 @@ sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR, static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Previous, VPDominatorTree &VPDT) { - if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory()) + if (cannotHoistOrSinkRecipe(*Previous)) return false; // Collect recipes that need hoisting. @@ -1912,11 +1930,6 @@ static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR, return nullptr; return HoistCandidate; }; - auto CanHoist = [&](VPRecipeBase *HoistCandidate) { - // Avoid hoisting candidates with side-effects, as we do not yet analyze - // associated dependencies. - return !HoistCandidate->mayHaveSideEffects(); - }; if (!NeedsHoisting(Previous->getVPSingleValue())) return true; @@ -1928,7 +1941,7 @@ static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR, VPRecipeBase *Current = HoistCandidates[I]; assert(Current->getNumDefinedValues() == 1 && "only recipes with a single defined value expected"); - if (!CanHoist(Current)) + if (cannotHoistOrSinkRecipe(*Current)) return false; for (VPValue *Op : Current->operands()) { @@ -2143,24 +2156,6 @@ void VPlanTransforms::cse(VPlan &Plan) { static void licm(VPlan &Plan) { VPBasicBlock *Preheader = Plan.getVectorPreheader(); - // Return true if we do not know how to (mechanically) hoist a given recipe - // out of a loop region. - auto CannotHoistRecipe = [](VPRecipeBase &R) { - // Assumes don't alias anything or throw; as long as they're guaranteed to - // execute, they're safe to hoist. - if (match(&R, m_Intrinsic())) - return false; - - // TODO: Relax checks in the future, e.g. we could also hoist reads, if - // their memory location is not modified in the vector loop. - if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi()) - return true; - - // Allocas cannot be hoisted. - auto *RepR = dyn_cast(&R); - return RepR && RepR->getOpcode() == Instruction::Alloca; - }; - // Hoist any loop invariant recipes from the vector loop region to the // preheader. Preform a shallow traversal of the vector loop region, to // exclude recipes in replicate regions. Since the top-level blocks in the @@ -2172,7 +2167,7 @@ static void licm(VPlan &Plan) { for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly( vp_depth_first_shallow(LoopRegion->getEntry()))) { for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { - if (CannotHoistRecipe(R)) + if (cannotHoistOrSinkRecipe(R)) continue; if (any_of(R.operands(), [](VPValue *Op) { return !Op->isDefinedOutsideLoopRegions(); diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll index 74129806ad6fb..12d73a3ffe27e 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll @@ -425,3 +425,79 @@ loop: exit: ret void } + +define void @hoist_previous_value_and_operand_load(ptr %dst) { +; CHECK-LABEL: @hoist_previous_value_and_operand_load( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[FOR_1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[LOAD:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[FOR_2:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[OR]] = or i32 [[FOR_1]], 3 +; CHECK-NEXT: [[ADD]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[IV]] +; CHECK-NEXT: store i32 [[FOR_2]], ptr [[GEP]], align 4 +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 337 +; CHECK-NEXT: [[LOAD]] = load i32, ptr [[DST]], align 4 +; CHECK-NEXT: br i1 [[ICMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 1, %entry ], [ %add, %loop ] + %for.1 = phi i32 [ 1, %entry ], [ %load, %loop ] + %for.2 = phi i32 [ 0, %entry ], [ %or, %loop ] + %or = or i32 %for.1, 3 + %add = add i64 %iv, 1 + %gep = getelementptr inbounds i32, ptr %dst, i64 %iv + store i32 %for.2, ptr %gep + %icmp = icmp ult i64 %iv, 337 + %load = load i32, ptr %dst + br i1 %icmp, label %loop, label %exit + +exit: + ret void +} + +define void @hoist_previous_value_and_operand_assume(ptr %dst) { +; CHECK-LABEL: @hoist_previous_value_and_operand_assume( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[FOR_1:%.*]] = phi i1 [ true, [[ENTRY]] ], [ [[TRUNC:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[FOR_2:%.*]] = phi i1 [ false, [[ENTRY]] ], [ [[OR:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[OR]] = or i1 [[FOR_1]], true +; CHECK-NEXT: [[ADD]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[IV]] +; CHECK-NEXT: store i1 [[FOR_2]], ptr [[GEP]], align 1 +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 337 +; CHECK-NEXT: call void @llvm.assume(i1 [[FOR_1]]) +; CHECK-NEXT: [[TRUNC]] = trunc i64 [[IV]] to i1 +; CHECK-NEXT: br i1 [[ICMP]], label [[LOOP]], label [[EXIT:%.*]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 1, %entry ], [ %add, %loop ] + %for.1 = phi i1 [ 1, %entry ], [ %trunc, %loop ] + %for.2 = phi i1 [ 0, %entry ], [ %or, %loop ] + %or = or i1 %for.1, 3 + %add = add i64 %iv, 1 + %gep = getelementptr inbounds i32, ptr %dst, i64 %iv + store i1 %for.2, ptr %gep + %icmp = icmp ult i64 %iv, 337 + call void @llvm.assume(i1 %for.1) + %trunc = trunc i64 %iv to i1 + br i1 %icmp, label %loop, label %exit + +exit: + ret void +}