-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[GVN] Teach GVN simple masked load/store forwarding #157689
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
This patch teaches GVN how to eliminate redundant masked loads and forward previous loads or instructions with a select. This is possible when the same mask is used for masked stores/loads that write to the same memory location
|
@llvm/pr-subscribers-llvm-ir @llvm/pr-subscribers-llvm-transforms Author: Matthew Devereau (MDevereau) ChangesThis patch teaches GVN how to eliminate redundant masked loads and forward previous loads or instructions with a select. This is possible when the same mask is used for masked stores/loads that write to the same memory location Full diff: https://github.com/llvm/llvm-project/pull/157689.diff 3 Files Affected:
diff --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h
index 245414935bc0f..74a4d6ce00fcc 100644
--- a/llvm/include/llvm/Transforms/Scalar/GVN.h
+++ b/llvm/include/llvm/Transforms/Scalar/GVN.h
@@ -56,6 +56,7 @@ class OptimizationRemarkEmitter;
class PHINode;
class TargetLibraryInfo;
class Value;
+class IntrinsicInst;
/// A private "module" namespace for types and utilities used by GVN. These
/// are implementation details and should not be used by clients.
namespace LLVM_LIBRARY_VISIBILITY_NAMESPACE gvn {
@@ -349,6 +350,7 @@ class GVNPass : public PassInfoMixin<GVNPass> {
// Helper functions of redundant load elimination.
bool processLoad(LoadInst *L);
+ bool processMaskedLoad(IntrinsicInst *I);
bool processNonLocalLoad(LoadInst *L);
bool processAssumeIntrinsic(AssumeInst *II);
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 26e17cc849bff..10325ab7c5737 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -50,6 +50,7 @@
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
@@ -2287,6 +2288,50 @@ bool GVNPass::processLoad(LoadInst *L) {
return true;
}
+// Attempt to process masked loads which have loaded from
+// masked stores with the same mask
+bool GVNPass::processMaskedLoad(IntrinsicInst *I) {
+ Value *Mask = I->getOperand(2);
+ Value *Passthrough = I->getOperand(3);
+
+ MemDepResult Dep = MD->getDependency(I);
+ Instruction *DepInst = Dep.getInst();
+ if (!DepInst || !Dep.isLocal())
+ return false;
+
+ auto *MaskedStore = dyn_cast<IntrinsicInst>(DepInst);
+ if (!MaskedStore || MaskedStore->getIntrinsicID() != Intrinsic::masked_store)
+ return false;
+
+ auto StoreMask = MaskedStore->getOperand(3);
+ if (StoreMask != Mask)
+ return false;
+
+ Value *OpToForward =
+ AvailableValue::get(MaskedStore->getOperand(0)).getSimpleValue();
+ if (auto *LoadToForward = dyn_cast<IntrinsicInst>(OpToForward);
+ LoadToForward &&
+ LoadToForward->getIntrinsicID() == Intrinsic::masked_load) {
+ // For MaskedLoad->MaskedStore->MaskedLoad, the mask must be the same for
+ // all three instructions. The Passthrough on the two loads must also be the
+ // same.
+ if (LoadToForward->getOperand(2) != Mask ||
+ LoadToForward->getOperand(3) != Passthrough)
+ return false;
+ } else {
+ // MaskedStore(Op, ptr, mask)->MaskedLoad(ptr, mask, passthrough) can be
+ // replaced with MaskedStore(Op, ptr, mask)->select(mask, Op, passthrough)
+ IRBuilder<> Builder(I);
+ OpToForward = Builder.CreateSelect(StoreMask, OpToForward, Passthrough);
+ }
+
+ I->replaceAllUsesWith(OpToForward);
+ ICF->removeUsersOf(I);
+ salvageAndRemoveInstruction(I);
+ ++NumGVNLoad;
+ return true;
+}
+
/// Return a pair the first field showing the value number of \p Exp and the
/// second field showing whether it is a value number newly created.
std::pair<uint32_t, bool>
@@ -2734,6 +2779,11 @@ bool GVNPass::processInstruction(Instruction *I) {
return false;
}
+ if (auto *II = dyn_cast<IntrinsicInst>(I))
+ if (II && II->getIntrinsicID() == Intrinsic::masked_load)
+ if (processMaskedLoad(II))
+ return true;
+
// For conditional branches, we can perform simple conditional propagation on
// the condition value itself.
if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
diff --git a/llvm/test/Transforms/GVN/masked-load-store.ll b/llvm/test/Transforms/GVN/masked-load-store.ll
index 984a756591701..b32279941d0b0 100644
--- a/llvm/test/Transforms/GVN/masked-load-store.ll
+++ b/llvm/test/Transforms/GVN/masked-load-store.ll
@@ -36,6 +36,164 @@ define <128 x i8> @f1(ptr %a0, <128 x i8> %a1, <128 x i8> %a2) {
ret <128 x i8> %v4
}
+define <4 x float> @forward_masked_load(ptr %0, ptr %1) {
+; CHECK-LABEL: @forward_masked_load(
+; CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer)
+; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <4 x i1> splat (i1 true))
+; CHECK-NEXT: ret <4 x float> [[TMP4]]
+;
+ %6 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 4)
+ %7 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %6, <4 x float> zeroinitializer)
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %7, ptr %1, i32 1, <4 x i1> %6)
+ %8 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %6, <4 x float> zeroinitializer)
+ ret <4 x float> %8
+}
+
+define <4 x float> @forward_binop_splat_i1_mask(ptr %0, ptr %1) {
+; CHECK-LABEL: @forward_binop_splat_i1_mask(
+; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer)
+; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer)
+; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]]
+; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> splat (i1 true))
+; CHECK-NEXT: ret <4 x float> [[FMUL]]
+;
+ %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 4)
+ %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer)
+ %gep.0.16 = getelementptr i8, ptr %0, i32 16
+ %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer)
+ %fmul = fmul <4 x float> %load.0.0, %load.0.16
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask)
+ %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer)
+ ret <4 x float> %load.1.0
+}
+
+define <4 x float> @forward_binop_with_sel(ptr %0, ptr %1, i32 %a, i32 %b, <4 x float> %passthrough) {
+; CHECK-LABEL: @forward_binop_with_sel(
+; CHECK-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer)
+; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer)
+; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]]
+; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]])
+; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[FMUL]], <4 x float> [[PASSTHROUGH:%.*]]
+; CHECK-NEXT: ret <4 x float> [[TMP3]]
+;
+ %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %a, i32 %b)
+ %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer)
+ %gep.0.16 = getelementptr i8, ptr %0, i32 16
+ %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer)
+ %fmul = fmul <4 x float> %load.0.0, %load.0.16
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask)
+ %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> %passthrough)
+ ret <4 x float> %load.1.0
+}
+
+define <vscale x 4 x float> @forward_masked_load_scalable(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) {
+; CHECK-LABEL: @forward_masked_load_scalable(
+; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[PASSTHROUGH:%.*]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]])
+; CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
+;
+ %6 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+ %7 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %6, <vscale x 4 x float> %passthrough)
+ call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %7, ptr %1, i32 1, <vscale x 4 x i1> %6)
+ %8 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %6, <vscale x 4 x float> %passthrough)
+ ret <vscale x 4 x float> %8
+}
+
+define <vscale x 4 x float> @bail_on_different_passthrough(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) {
+; CHECK-LABEL: @bail_on_different_passthrough(
+; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[PASSTHROUGH:%.*]])
+; CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
+;
+ %6 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+ %7 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %6, <vscale x 4 x float> zeroinitializer)
+ call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %7, ptr %1, i32 1, <vscale x 4 x i1> %6)
+ %8 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %6, <vscale x 4 x float> %passthrough)
+ ret <vscale x 4 x float> %8
+}
+
+define <vscale x 4 x float> @forward_binop_with_sel_scalable(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) {
+; CHECK-LABEL: @forward_binop_with_sel_scalable(
+; CHECK-NEXT: [[MASK:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK]])
+; CHECK-NEXT: [[TMP3:%.*]] = select <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> [[FMUL]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]
+; CHECK-NEXT: ret <vscale x 4 x float> [[TMP3]]
+;
+ %mask = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+ %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer)
+ %gep.0.16 = getelementptr i8, ptr %0, i32 16
+ %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer)
+ %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16
+ call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask)
+ %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough)
+ ret <vscale x 4 x float> %load.1.0
+}
+
+define <vscale x 4 x float> @load_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) {
+; CHECK-LABEL: @load_mask_differs(
+; CHECK-NEXT: [[MASK0:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8)
+; CHECK-NEXT: [[MASK1:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]])
+; CHECK-NEXT: [[LOAD_1_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK1]], <vscale x 4 x float> [[PASSTHROUGH:%.*]])
+; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD_1_0]]
+;
+ %mask0 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8)
+ %mask1 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+ %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer)
+ %gep.0.16 = getelementptr i8, ptr %0, i32 16
+ %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer)
+ %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16
+ call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask0)
+ %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask1, <vscale x 4 x float> %passthrough)
+ ret <vscale x 4 x float> %load.1.0
+}
+
+define <vscale x 4 x float> @store_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) {
+; CHECK-LABEL: @store_mask_differs(
+; CHECK-NEXT: [[MASK0:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8)
+; CHECK-NEXT: [[MASK1:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK1]])
+; CHECK-NEXT: [[LOAD_1_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> [[PASSTHROUGH:%.*]])
+; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD_1_0]]
+;
+ %mask0 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8)
+ %mask1 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4)
+ %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer)
+ %gep.0.16 = getelementptr i8, ptr %0, i32 16
+ %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer)
+ %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16
+ call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask1)
+ %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> %passthrough)
+ ret <vscale x 4 x float> %load.1.0
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: read)
+declare <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr captures(none), i32 immarg, <vscale x 4 x i1>, <vscale x 4 x float>) #1
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: write)
+declare void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float>, ptr captures(none), i32 immarg, <vscale x 4 x i1>) #2
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32) #3
+
declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32, <128 x i1>, <128 x i8>)
declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32, <128 x i1>)
|
|
The following tests are failing: LLVM.CodeGen/AMDGPU/fcopysign.gfx11plus.ll |
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| return false; | ||
| } | ||
|
|
||
| if (auto *II = dyn_cast<IntrinsicInst>(I)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think you can simplify this a little by doing something like:
if (match(I, m_Intrinsic<Intrinsic::masked_load>()) && processMaskedLoad(cast<IntrinsicInst>(I)))
return true;
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
| Value *Mask = I->getOperand(2); | ||
| Value *Passthrough = I->getOperand(3); | ||
|
|
||
| MemDepResult Dep = MD->getDependency(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looking at processLoad it seems like you need an additional check:
if (!MD)
return false;
Would be good to add a test for this too.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've added the test masked-load-store-no-mem-dep.ll to deal with this.
| m_Intrinsic<Intrinsic::masked_store>(m_Value(StoreVal), m_Value(), | ||
| m_Value(), m_Specific(Mask)))) | ||
| return false; | ||
|
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If a dependency is local then it's either a Clobber or a Def, and it looks like the code below is assuming a Def. Would be good to clarify this. If so, I think you have to bail out for the clobber case as that's more complicated - see AnalyzeLoadAvailability.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I added a bail-out if it's not a Def
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
|
|
||
| Value *StoreVal; | ||
| if (!match(DepInst, | ||
| m_Intrinsic<Intrinsic::masked_store>(m_Value(StoreVal), m_Value(), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might be worth adding an equivalent m_MaskedStore to llvm/IR/PatternMatch.h and using that instead?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done. Though, I wonder how many intrinsics warrant a specific match before it becomes a bit of a code bloat.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| // For MaskedLoad->MaskedStore->MaskedLoad, the mask must be the same for | ||
| // all three instructions. The Passthrough on the two loads must also be the | ||
| // same. | ||
| OpToForward = AvailableValue::get(StoreVal).getSimpleValue(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think there is any value in calling AvailableValue::get(StoreVal).getSimpleValue() because that's identical to just doing:
OpToForward = StoreVal;
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've removed the AvailableValue call.
| } | ||
|
|
||
| I->replaceAllUsesWith(OpToForward); | ||
| ICF->removeUsersOf(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't if ordering matters, but I noticed that in processLoads we do this the other way around:
ICF->removeUsersOf(L);
L->replaceAllUsesWith(AvailableValue);
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
|
|
||
| I->replaceAllUsesWith(OpToForward); | ||
| ICF->removeUsersOf(I); | ||
| salvageAndRemoveInstruction(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do we also need to call
if (MSSAU)
MSSAU->removeMemoryAccess(L);
and
if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
MD->invalidateCachedPointerInfo(AvailableValue);
similar to processLoads?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
if (MSSAU)
MSSAU->removeMemoryAccess(L);This is indirectly called by
salvageAndRemoveInstruction(I)
//...
removeInstruction(I)I've added the Pointer stuff though.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| // all three instructions. The Passthrough on the two loads must also be the | ||
| // same. | ||
| OpToForward = AvailableValue::get(StoreVal).getSimpleValue(); | ||
| else if (match(StoreVal, m_Intrinsic<Intrinsic::masked_load>())) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think if you're hitting masked loads here it might be because you're permitting "clobbers". What happens if you bail out for clobbers as I mentioned above?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The test @bail_on_different_passthrough I added to masked-load-store.ll removes a load when excluding clobbers. I think it's OK since it falls to the else case where the inactive select lanes are the passthrough value, so i've renamed the test to generate_sel_with_passthrough
That being said, removing the matches for StoreVal which you suggested here and always generating a select removes the need for this else if clause anyway.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| else { | ||
| // MaskedStore(Op, ptr, mask)->MaskedLoad(ptr, mask, passthrough) can be | ||
| // replaced with MaskedStore(Op, ptr, mask)->select(mask, Op, passthrough) | ||
| IRBuilder<> Builder(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It looks like GVN.cpp typically doesn't use IRBuilder and prefers to create instructions and insert them manually, i.e.
PHINode *Phi = PHINode::Create(CurInst->getType(), PredMap.size(),
CurInst->getName() + ".pre-phi");
Phi->insertBefore(CurrentBlock->begin());
Having said that, I don't see ac actual problem with using IRBuilder.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've replaced it with Select::CreateInst, it avoids an extra #include which is nice
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| return false; | ||
|
|
||
| Value *OpToForward = nullptr; | ||
| if (match(StoreVal, m_MaskedLoad(m_Value(), m_Value(), m_Specific(Mask), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It feels a bit wrong to be testing the stored value here - it's an extra level of complexity that I don't think you need. I'd expect that if you always create a select like you do below, that instcombine would fold the select into the masked load anyway if the passthru matches. If instcombine doesn't do that, then perhaps worth adding that pattern?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As it stands I don't believe said InstCombine exists: https://godbolt.org/z/K835KeW1d. I think what you're proposing is a more elegant way to reach the end result though.
MDevereau
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks @david-arm, I think I've responded to each of your comments
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| // all three instructions. The Passthrough on the two loads must also be the | ||
| // same. | ||
| OpToForward = AvailableValue::get(StoreVal).getSimpleValue(); | ||
| else if (match(StoreVal, m_Intrinsic<Intrinsic::masked_load>())) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The test @bail_on_different_passthrough I added to masked-load-store.ll removes a load when excluding clobbers. I think it's OK since it falls to the else case where the inactive select lanes are the passthrough value, so i've renamed the test to generate_sel_with_passthrough
That being said, removing the matches for StoreVal which you suggested here and always generating a select removes the need for this else if clause anyway.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| return false; | ||
|
|
||
| Value *OpToForward = nullptr; | ||
| if (match(StoreVal, m_MaskedLoad(m_Value(), m_Value(), m_Specific(Mask), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As it stands I don't believe said InstCombine exists: https://godbolt.org/z/K835KeW1d. I think what you're proposing is a more elegant way to reach the end result though.
|
|
||
| I->replaceAllUsesWith(OpToForward); | ||
| ICF->removeUsersOf(I); | ||
| salvageAndRemoveInstruction(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
if (MSSAU)
MSSAU->removeMemoryAccess(L);This is indirectly called by
salvageAndRemoveInstruction(I)
//...
removeInstruction(I)I've added the Pointer stuff though.
| m_Intrinsic<Intrinsic::masked_store>(m_Value(StoreVal), m_Value(), | ||
| m_Value(), m_Specific(Mask)))) | ||
| return false; | ||
|
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I added a bail-out if it's not a Def
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| // For MaskedLoad->MaskedStore->MaskedLoad, the mask must be the same for | ||
| // all three instructions. The Passthrough on the two loads must also be the | ||
| // same. | ||
| OpToForward = AvailableValue::get(StoreVal).getSimpleValue(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've removed the AvailableValue call.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| else { | ||
| // MaskedStore(Op, ptr, mask)->MaskedLoad(ptr, mask, passthrough) can be | ||
| // replaced with MaskedStore(Op, ptr, mask)->select(mask, Op, passthrough) | ||
| IRBuilder<> Builder(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've replaced it with Select::CreateInst, it avoids an extra #include which is nice
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| return false; | ||
| } | ||
|
|
||
| if (auto *II = dyn_cast<IntrinsicInst>(I)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
| } | ||
|
|
||
| I->replaceAllUsesWith(OpToForward); | ||
| ICF->removeUsersOf(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
|
|
||
| Value *StoreVal; | ||
| if (!match(DepInst, | ||
| m_Intrinsic<Intrinsic::masked_store>(m_Value(StoreVal), m_Value(), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done. Though, I wonder how many intrinsics warrant a specific match before it becomes a bit of a code bloat.
| Value *Mask = I->getOperand(2); | ||
| Value *Passthrough = I->getOperand(3); | ||
|
|
||
| MemDepResult Dep = MD->getDependency(I); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've added the test masked-load-store-no-mem-dep.ll to deal with this.
david-arm
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's looking pretty good now, thanks for addressing all the previous comments! I just have a few more minor comments, then I think it's ready to go.
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| Value *Passthrough = I->getOperand(3); | ||
| Value *StoreVal; | ||
| if (!match(DepInst, m_MaskedStore(m_Value(StoreVal), m_Value(), m_Value(), | ||
| m_Specific(Mask)))) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I guess a future optimisation might permit load masks that are a known subset of the store mask, or indeed a masked load of any arbitrary store where the masked element can fit within the size of the store. But that's for a different day!
llvm/lib/Transforms/Scalar/GVN.cpp
Outdated
| ICF->removeUsersOf(I); | ||
| I->replaceAllUsesWith(OpToForward); | ||
| salvageAndRemoveInstruction(I); | ||
| if (OpToForward->getType()->isPtrOrPtrVectorTy()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do we need a test for this, i.e. loading/storing a vector of pointers?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
After looking into it invalidateCachedPointerInfo only handles scalar pointers, and just bails on pointer vectors. Therefore I don't think we need to call it here, since masked loads can only be vector types. I've removed the check for a pointer/vector pointer type.
| ret <vscale x 4 x float> %load2 | ||
| } | ||
|
|
||
| define <vscale x 4 x float> @generate_sel_with_passthrough(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This test looks the same as the one above I think?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There is a difference, which is that the passthrough on %load2 is emitted by the select even when %load1 has a zeroinitializer passthrough. I put this test in when I thought different passthroughs required a bailout, but I'm not sure that there's value in having both anymore.
| %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) | ||
| %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 | ||
| call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask0) | ||
| %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask1, <vscale x 4 x float> %passthrough) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In theory, we could still remove the load because mask1 is a subset of mask0 and the pointer is the same. We'd just need to create a select based on mask1 and passthrough.
Again, doesn't have to be done in this patch!
| ret <vscale x 4 x float> %load.1.0 | ||
| } | ||
|
|
||
| define <vscale x 4 x float> @store_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It seems to me the main difference between this test and the one above is that here there isn't any chance to optimise the final load because the size of the stored value is less than the loaded one due to mask1 being smaller than mask0.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This was just to check that any mask if different in any way, even if one mask is a subset of another, gets rejected. Are you asking for any changes to either test?
|
|
||
| declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32, <128 x i1>, <128 x i8>) | ||
| declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32, <128 x i1>) | ||
| define <4 x float> @forward_masked_load(ptr %0, ptr %1) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might be worth having a test for an arbitrary mask as well that's passed in as a function argument, i.e. we can apply the optimisation even if the mask isn't known to come from get.active.lane.mask.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've added forward_masked_load_arbitrary_mask
|
I've added an extra test |
david-arm
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM!
This patch teaches GVN how to eliminate redundant masked loads and forward previous loads or instructions with a select. This is possible when the same mask is used for masked stores/loads that write to the same memory location
This patch teaches GVN how to eliminate redundant masked loads and forward previous loads or instructions with a select. This is possible when the same mask is used for masked stores/loads that write to the same memory location