Skip to content

Commit c3bcf04

Browse files
committed
Revert "Require reassoc"
This reverts commit 319852132602f685aea6228f10418370fd530aa7.
1 parent 39e962b commit c3bcf04

File tree

2 files changed

+9
-15
lines changed

2 files changed

+9
-15
lines changed

llvm/lib/IR/Verifier.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6561,12 +6561,6 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
65616561
}
65626562
break;
65636563
}
6564-
case Intrinsic::vector_partial_reduce_fadd: {
6565-
Check(Call.hasAllowReassoc(),
6566-
"vector_partial_reduce_fadd requires reassociation to be allowed.");
6567-
// Fall through to perform the same verification checks as for integers.
6568-
[[fallthrough]];
6569-
}
65706564
case Intrinsic::vector_partial_reduce_add: {
65716565
VectorType *AccTy = cast<VectorType>(Call.getArgOperand(0)->getType());
65726566
VectorType *VecTy = cast<VectorType>(Call.getArgOperand(1)->getType());

llvm/test/CodeGen/AArch64/sve2p1-fdot.ll

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ entry:
1010
%a.wide = fpext <vscale x 8 x half> %a to <vscale x 8 x float>
1111
%b.wide = fpext <vscale x 8 x half> %b to <vscale x 8 x float>
1212
%mult = fmul <vscale x 8 x float> %a.wide, %b.wide
13-
%partial.reduce = call reassoc <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %mult)
13+
%partial.reduce = call <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %mult)
1414
ret <vscale x 4 x float> %partial.reduce
1515
}
1616

@@ -40,7 +40,7 @@ entry:
4040
%a.wide = fpext <16 x half> %a to <16 x float>
4141
%b.wide = fpext <16 x half> %b to <16 x float>
4242
%mult = fmul <16 x float> %a.wide, %b.wide
43-
%partial.reduce = call reassoc <8 x float> @llvm.vector.partial.reduce.fadd(<8 x float> %acc, <16 x float> %mult)
43+
%partial.reduce = call <8 x float> @llvm.vector.partial.reduce.fadd(<8 x float> %acc, <16 x float> %mult)
4444
store <8 x float> %partial.reduce, ptr %accptr
4545
ret void
4646
}
@@ -61,7 +61,7 @@ entry:
6161
%a.wide = fpext <8 x half> %a to <8 x float>
6262
%b.wide = fpext <8 x half> %b to <8 x float>
6363
%mult = fmul <8 x float> %a.wide, %b.wide
64-
%partial.reduce = call reassoc <4 x float> @llvm.vector.partial.reduce.fadd(<4 x float> %acc, <8 x float> %mult)
64+
%partial.reduce = call <4 x float> @llvm.vector.partial.reduce.fadd(<4 x float> %acc, <8 x float> %mult)
6565
ret <4 x float> %partial.reduce
6666
}
6767

@@ -72,7 +72,7 @@ define <8 x half> @partial_reduce_half(<8 x half> %acc, <16 x half> %a) {
7272
; CHECK-NEXT: fadd v0.8h, v0.8h, v2.8h
7373
; CHECK-NEXT: ret
7474
entry:
75-
%partial.reduce = call reassoc <8 x half> @llvm.vector.partial.reduce.fadd(<8 x half> %acc, <16 x half> %a)
75+
%partial.reduce = call <8 x half> @llvm.vector.partial.reduce.fadd(<8 x half> %acc, <16 x half> %a)
7676
ret <8 x half> %partial.reduce
7777
}
7878

@@ -83,7 +83,7 @@ define <4 x float> @partial_reduce_float(<4 x float> %acc, <8 x float> %a) {
8383
; CHECK-NEXT: fadd v0.4s, v0.4s, v2.4s
8484
; CHECK-NEXT: ret
8585
entry:
86-
%partial.reduce = call reassoc <4 x float> @llvm.vector.partial.reduce.fadd(<4 x float> %acc, <8 x float> %a)
86+
%partial.reduce = call <4 x float> @llvm.vector.partial.reduce.fadd(<4 x float> %acc, <8 x float> %a)
8787
ret <4 x float> %partial.reduce
8888
}
8989

@@ -94,7 +94,7 @@ define <2 x double> @partial_reduce_double(<2 x double> %acc, <4 x double> %a) {
9494
; CHECK-NEXT: fadd v0.2d, v0.2d, v2.2d
9595
; CHECK-NEXT: ret
9696
entry:
97-
%partial.reduce = call reassoc <2 x double> @llvm.vector.partial.reduce.fadd(<2 x double> %acc, <4 x double> %a)
97+
%partial.reduce = call <2 x double> @llvm.vector.partial.reduce.fadd(<2 x double> %acc, <4 x double> %a)
9898
ret <2 x double> %partial.reduce
9999
}
100100

@@ -105,7 +105,7 @@ define <vscale x 8 x half> @partial_reduce_half_vl128(<vscale x 8 x half> %acc,
105105
; CHECK-NEXT: fadd z0.h, z0.h, z2.h
106106
; CHECK-NEXT: ret
107107
entry:
108-
%partial.reduce = call reassoc <vscale x 8 x half> @llvm.vector.partial.reduce.fadd(<vscale x 8 x half> %acc, <vscale x 16 x half> %a)
108+
%partial.reduce = call <vscale x 8 x half> @llvm.vector.partial.reduce.fadd(<vscale x 8 x half> %acc, <vscale x 16 x half> %a)
109109
ret <vscale x 8 x half> %partial.reduce
110110
}
111111

@@ -116,7 +116,7 @@ define <vscale x 4 x float> @partial_reduce_float_vl128(<vscale x 4 x float> %ac
116116
; CHECK-NEXT: fadd z0.s, z0.s, z2.s
117117
; CHECK-NEXT: ret
118118
entry:
119-
%partial.reduce = call reassoc <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %a)
119+
%partial.reduce = call <vscale x 4 x float> @llvm.vector.partial.reduce.fadd(<vscale x 4 x float> %acc, <vscale x 8 x float> %a)
120120
ret <vscale x 4 x float> %partial.reduce
121121
}
122122

@@ -127,6 +127,6 @@ define <vscale x 2 x double> @partial_reduce_double_vl128(<vscale x 2 x double>
127127
; CHECK-NEXT: fadd z0.d, z0.d, z2.d
128128
; CHECK-NEXT: ret
129129
entry:
130-
%partial.reduce = call reassoc <vscale x 2 x double> @llvm.vector.partial.reduce.fadd(<vscale x 2 x double> %acc, <vscale x 4 x double> %a)
130+
%partial.reduce = call <vscale x 2 x double> @llvm.vector.partial.reduce.fadd(<vscale x 2 x double> %acc, <vscale x 4 x double> %a)
131131
ret <vscale x 2 x double> %partial.reduce
132132
}

0 commit comments

Comments
 (0)