@@ -1582,9 +1582,9 @@ define <4 x float> @test_v4f32_fneg_fmadd(<4 x float> %a0, <4 x float> %a1, <4 x
15821582; AVX512: # %bb.0:
15831583; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
15841584; AVX512-NEXT: retq
1585- %mul = fmul contract contract nsz <4 x float > %a0 , %a1
1586- %add = fadd contract contract nsz <4 x float > %mul , %a2
1587- %neg = fsub contract contract nsz <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %add
1585+ %mul = fmul contract nsz <4 x float > %a0 , %a1
1586+ %add = fadd contract nsz <4 x float > %mul , %a2
1587+ %neg = fsub contract nsz <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %add
15881588 ret <4 x float > %neg
15891589}
15901590
@@ -1624,10 +1624,10 @@ define <4 x float> @test_v4f32_fneg_fnmadd(<4 x float> %a0, <4 x float> %a1, <4
16241624; AVX512: # %bb.0:
16251625; AVX512-NEXT: vfmsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2
16261626; AVX512-NEXT: retq
1627- %mul = fmul contract contract nsz <4 x float > %a0 , %a1
1628- %neg0 = fsub contract contract nsz <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %mul
1629- %add = fadd contract contract nsz <4 x float > %neg0 , %a2
1630- %neg1 = fsub contract contract nsz <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %add
1627+ %mul = fmul contract nsz <4 x float > %a0 , %a1
1628+ %neg0 = fsub contract nsz <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %mul
1629+ %add = fadd contract nsz <4 x float > %neg0 , %a2
1630+ %neg1 = fsub contract nsz <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %add
16311631 ret <4 x float > %neg1
16321632}
16331633
@@ -1646,10 +1646,10 @@ define <4 x double> @test_v4f64_fneg_fnmsub(<4 x double> %a0, <4 x double> %a1,
16461646; AVX512: # %bb.0:
16471647; AVX512-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
16481648; AVX512-NEXT: retq
1649- %mul = fmul contract contract nsz <4 x double > %a0 , %a1
1650- %neg0 = fsub contract contract nsz <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %mul
1651- %sub = fsub contract contract nsz <4 x double > %neg0 , %a2
1652- %neg1 = fsub contract contract nsz <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %sub
1649+ %mul = fmul contract nsz <4 x double > %a0 , %a1
1650+ %neg0 = fsub contract nsz <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %mul
1651+ %sub = fsub contract nsz <4 x double > %neg0 , %a2
1652+ %neg1 = fsub contract nsz <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %sub
16531653 ret <4 x double > %neg1
16541654}
16551655
@@ -1672,9 +1672,9 @@ define <4 x float> @test_v4f32_fma_x_c1_fmul_x_c2(<4 x float> %x) {
16721672; AVX512: # %bb.0:
16731673; AVX512-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
16741674; AVX512-NEXT: retq
1675- %m0 = fmul contract contract reassoc <4 x float > %x , <float 1 .0 , float 2 .0 , float 3 .0 , float 4 .0 >
1676- %m1 = fmul contract contract reassoc <4 x float > %x , <float 4 .0 , float 3 .0 , float 2 .0 , float 1 .0 >
1677- %a = fadd contract contract reassoc <4 x float > %m0 , %m1
1675+ %m0 = fmul contract reassoc <4 x float > %x , <float 1 .0 , float 2 .0 , float 3 .0 , float 4 .0 >
1676+ %m1 = fmul contract reassoc <4 x float > %x , <float 4 .0 , float 3 .0 , float 2 .0 , float 1 .0 >
1677+ %a = fadd contract reassoc <4 x float > %m0 , %m1
16781678 ret <4 x float > %a
16791679}
16801680
@@ -1697,9 +1697,9 @@ define <4 x float> @test_v4f32_fma_fmul_x_c1_c2_y(<4 x float> %x, <4 x float> %y
16971697; AVX512: # %bb.0:
16981698; AVX512-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
16991699; AVX512-NEXT: retq
1700- %m0 = fmul contract contract reassoc <4 x float > %x , <float 1 .0 , float 2 .0 , float 3 .0 , float 4 .0 >
1701- %m1 = fmul contract contract reassoc <4 x float > %m0 , <float 4 .0 , float 3 .0 , float 2 .0 , float 1 .0 >
1702- %a = fadd contract contract reassoc <4 x float > %m1 , %y
1700+ %m0 = fmul contract reassoc <4 x float > %x , <float 1 .0 , float 2 .0 , float 3 .0 , float 4 .0 >
1701+ %m1 = fmul contract reassoc <4 x float > %m0 , <float 4 .0 , float 3 .0 , float 2 .0 , float 1 .0 >
1702+ %a = fadd contract reassoc <4 x float > %m1 , %y
17031703 ret <4 x float > %a
17041704}
17051705
@@ -1723,8 +1723,8 @@ define double @test_f64_fneg_fmul(double %x, double %y) {
17231723; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
17241724; AVX512-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
17251725; AVX512-NEXT: retq
1726- %m = fmul contract contract nsz double %x , %y
1727- %n = fsub contract contract double -0 .0 , %m
1726+ %m = fmul contract nsz double %x , %y
1727+ %n = fsub contract double -0 .0 , %m
17281728 ret double %n
17291729}
17301730
@@ -1746,8 +1746,8 @@ define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) {
17461746; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
17471747; AVX512-NEXT: vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
17481748; AVX512-NEXT: retq
1749- %m = fmul contract contract nsz <4 x float > %x , %y
1750- %n = fsub contract contract <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %m
1749+ %m = fmul contract nsz <4 x float > %x , %y
1750+ %n = fsub contract <4 x float > <float -0 .0 , float -0 .0 , float -0 .0 , float -0 .0 >, %m
17511751 ret <4 x float > %n
17521752}
17531753
@@ -1769,8 +1769,8 @@ define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) {
17691769; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
17701770; AVX512-NEXT: vfnmsub213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2
17711771; AVX512-NEXT: retq
1772- %m = fmul contract contract nsz <4 x double > %x , %y
1773- %n = fsub contract contract <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %m
1772+ %m = fmul contract nsz <4 x double > %x , %y
1773+ %n = fsub contract <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %m
17741774 ret <4 x double > %n
17751775}
17761776
@@ -1792,8 +1792,8 @@ define <4 x double> @test_v4f64_fneg_fmul_no_nsz(<4 x double> %x, <4 x double> %
17921792; AVX512-NEXT: vmulpd %ymm1, %ymm0, %ymm0
17931793; AVX512-NEXT: vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
17941794; AVX512-NEXT: retq
1795- %m = fmul contract contract <4 x double > %x , %y
1796- %n = fsub contract contract <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %m
1795+ %m = fmul contract <4 x double > %x , %y
1796+ %n = fsub contract <4 x double > <double -0 .0 , double -0 .0 , double -0 .0 , double -0 .0 >, %m
17971797 ret <4 x double > %n
17981798}
17991799
@@ -1848,7 +1848,7 @@ define float @fadd_fma_fmul_fmf(float %a, float %b, float %c, float %d, float %n
18481848; AVX512-NEXT: retq
18491849 %m1 = fmul contract float %a , %b
18501850 %m2 = fmul contract float %c , %d
1851- %a1 = fadd contract contract float %m1 , %m2
1851+ %a1 = fadd contract float %m1 , %m2
18521852 %a2 = fadd contract reassoc float %n0 , %a1
18531853 ret float %a2
18541854}
@@ -1878,8 +1878,8 @@ define float @fadd_fma_fmul_2(float %a, float %b, float %c, float %d, float %n0)
18781878; AVX512-NEXT: retq
18791879 %m1 = fmul contract float %a , %b
18801880 %m2 = fmul contract float %c , %d
1881- %a1 = fadd contract contract float %m1 , %m2
1882- %a2 = fadd contract contract float %n0 , %a1
1881+ %a1 = fadd contract float %m1 , %m2
1882+ %a2 = fadd contract float %n0 , %a1
18831883 ret float %a2
18841884}
18851885
0 commit comments