Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions deps/llvm.mk
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,8 @@ $(eval $(call LLVM_PATCH,llvm-PR278923)) # Issue #19976, Remove for 4.0
$(eval $(call LLVM_PATCH,llvm-D28759-loopclearance))
$(eval $(call LLVM_PATCH,llvm-D28786-callclearance))
$(eval $(call LLVM_PATCH,llvm-rL293230-icc17-cmake)) # Remove for 4.0
$(eval $(call LLVM_PATCH,llvm-D32593))
$(eval $(call LLVM_PATCH,llvm-D33179))
endif # LLVM_VER

ifeq ($(LLVM_VER),3.7.1)
Expand Down
83 changes: 83 additions & 0 deletions deps/patches/llvm-D32593.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
From 5eeab81d22e07b6e12821067fced590f534c251a Mon Sep 17 00:00:00 2001
From: Keno Fischer <[email protected]>
Date: Thu, 27 Apr 2017 14:33:33 -0400
Subject: [PATCH] [SROA] Fix crash due to bad bitcast

Summary:
As shown in the test case, SROA was crashing when trying to split
stores (to the alloca) of loads (from anywhere), because it assumed
the pointer operand to the loads and stores had to have the same
address space. This isn't the case. Make sure to use the correct
pointer type for both the load and the store.

Reviewers: chandlerc, majnemer, sanjoy

Subscribers: arsenm, llvm-commits

Differential Revision: https://reviews.llvm.org/D32593
---
lib/Transforms/Scalar/SROA.cpp | 7 ++++---
test/Transforms/SROA/address-spaces.ll | 18 ++++++++++++++++++
2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index d01e91a..610d5a8 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -3697,7 +3697,8 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
int Idx = 0, Size = Offsets.Splits.size();
for (;;) {
auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
- auto *PartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
+ auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
+ auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());

// Either lookup a split load or create one.
LoadInst *PLoad;
@@ -3708,7 +3709,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
PLoad = IRB.CreateAlignedLoad(
getAdjustedPtr(IRB, DL, LoadBasePtr,
APInt(DL.getPointerSizeInBits(), PartOffset),
- PartPtrTy, LoadBasePtr->getName() + "."),
+ LoadPartPtrTy, LoadBasePtr->getName() + "."),
getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false,
LI->getName());
}
@@ -3718,7 +3719,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
StoreInst *PStore = IRB.CreateAlignedStore(
PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr,
APInt(DL.getPointerSizeInBits(), PartOffset),
- PartPtrTy, StoreBasePtr->getName() + "."),
+ StorePartPtrTy, StoreBasePtr->getName() + "."),
getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false);

// Now build a new slice for the alloca.
diff --git a/test/Transforms/SROA/address-spaces.ll b/test/Transforms/SROA/address-spaces.ll
index 119f225..8fba30c 100644
--- a/test/Transforms/SROA/address-spaces.ll
+++ b/test/Transforms/SROA/address-spaces.ll
@@ -83,3 +83,21 @@ define void @pr27557() {
store i32 addrspace(3)* @l, i32 addrspace(3)** %3, align 8
ret void
}
+
+; Make sure pre-splitting doesn't try to introduce an illegal bitcast
+define float @presplit(i64 addrspace(1)* %p) {
+entry:
+; CHECK-LABEL: @presplit(
+; CHECK: %[[CAST:.*]] = bitcast i64 addrspace(1)* {{.*}} to i32 addrspace(1)*
+; CHECK: load i32, i32 addrspace(1)* %[[CAST]]
+ %b = alloca i64
+ %b.cast = bitcast i64* %b to [2 x float]*
+ %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
+ %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
+ %l = load i64, i64 addrspace(1)* %p
+ store i64 %l, i64* %b
+ %f1 = load float, float* %b.gep1
+ %f2 = load float, float* %b.gep2
+ %ret = fadd float %f1, %f2
+ ret float %ret
+}
--
2.9.3

64 changes: 64 additions & 0 deletions deps/patches/llvm-D33179.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
From b1a005ba688397ca360e89cd6c6f51f232d6c25e Mon Sep 17 00:00:00 2001
From: Keno Fischer <[email protected]>
Date: Fri, 19 May 2017 18:42:20 -0400
Subject: [PATCH] [Sink] Fix predicate in legality check

Summary:
isSafeToSpeculativelyExecute is the wrong predicate to use here.
All that checks for is whether it is safe to hoist a value due to
unaligned/un-dereferencable accesses. However, not only are we doing
sinking rather than hoisting, our concern is that the location
we're loading from may have been modified. Instead forbid sinking
any load across a critical edge.

Reviewers: majnemer

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D33179
---
lib/Transforms/Scalar/Sink.cpp | 2 +-
test/Transforms/Sink/badloadsink.ll | 18 ++++++++++++++++++
2 files changed, 19 insertions(+), 1 deletion(-)
create mode 100644 test/Transforms/Sink/badloadsink.ll

diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 102e9ea..5210f16 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -114,7 +114,7 @@ static bool IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo,
if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
- if (!isSafeToSpeculativelyExecute(Inst))
+ if (isa<LoadInst>(Inst))
return false;

// We don't want to sink across a critical edge if we don't dominate the
diff --git a/test/Transforms/Sink/badloadsink.ll b/test/Transforms/Sink/badloadsink.ll
new file mode 100644
index 0000000..e3f4884
--- /dev/null
+++ b/test/Transforms/Sink/badloadsink.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -basicaa -sink -S | FileCheck %s
+declare void @foo(i64 *)
+define i64 @sinkload(i1 %cmp) {
+; CHECK-LABEL: @sinkload
+top:
+ %a = alloca i64
+; CHECK: call void @foo(i64* %a)
+; CHECK-NEXT: %x = load i64, i64* %a
+ call void @foo(i64* %a)
+ %x = load i64, i64* %a
+ br i1 %cmp, label %A, label %B
+A:
+ store i64 0, i64 *%a
+ br label %B
+B:
+; CHECK-NOT: load i64, i64 *%a
+ ret i64 %x
+}
--
2.9.3