diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 0dfbb91f2ac54..3a5da379a9c49 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2567,7 +2567,8 @@ bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, SrcReg = OriginalSrcReg; LLT DstTy = MRI.getType(DstReg); return mi_match(SrcReg, MRI, - m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy)))); + m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy)))) && + canReplaceReg(DstReg, Reg, MRI); } bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, @@ -2577,7 +2578,8 @@ bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register SrcReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); if (mi_match(SrcReg, MRI, - m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) { + m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy)))) && + canReplaceReg(DstReg, Reg, MRI)) { unsigned DstSize = DstTy.getScalarSizeInBits(); unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits(); return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td index da47aaf8a3b5c..36653867fbba0 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td @@ -180,5 +180,5 @@ def AMDGPURegBankCombiner : GICombiner< [unmerge_merge, unmerge_cst, unmerge_undef, zext_trunc_fold, int_minmax_to_med3, ptr_add_immed_chain, fp_minmax_to_clamp, fp_minmax_to_med3, fmed3_intrinsic_to_clamp, - redundant_and]> { + identity_combines, redundant_and]> { } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll index ff5880819020d..38374d1689366 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll @@ -640,7 +640,6 @@ define amdgpu_ps i32 @s_saddo_i32(i32 inreg %a, i32 inreg %b) { ; GFX7-NEXT: s_cmp_lt_i32 s1, 0 ; GFX7-NEXT: s_cselect_b32 s1, 1, 0 ; GFX7-NEXT: s_xor_b32 s0, s1, s0 -; GFX7-NEXT: s_and_b32 s0, s0, 1 ; GFX7-NEXT: s_add_i32 s0, s2, s0 ; GFX7-NEXT: ; return to shader part epilog ; @@ -652,7 +651,6 @@ define amdgpu_ps i32 @s_saddo_i32(i32 inreg %a, i32 inreg %b) { ; GFX8-NEXT: s_cmp_lt_i32 s1, 0 ; GFX8-NEXT: s_cselect_b32 s1, 1, 0 ; GFX8-NEXT: s_xor_b32 s0, s1, s0 -; GFX8-NEXT: s_and_b32 s0, s0, 1 ; GFX8-NEXT: s_add_i32 s0, s2, s0 ; GFX8-NEXT: ; return to shader part epilog ; @@ -664,7 +662,6 @@ define amdgpu_ps i32 @s_saddo_i32(i32 inreg %a, i32 inreg %b) { ; GFX9-NEXT: s_cmp_lt_i32 s1, 0 ; GFX9-NEXT: s_cselect_b32 s1, 1, 0 ; GFX9-NEXT: s_xor_b32 s0, s1, s0 -; GFX9-NEXT: s_and_b32 s0, s0, 1 ; GFX9-NEXT: s_add_i32 s0, s2, s0 ; GFX9-NEXT: ; return to shader part epilog %saddo = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) @@ -749,8 +746,6 @@ define amdgpu_ps <2 x i32> @s_saddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b ; GFX7-NEXT: s_cselect_b32 s3, 1, 0 ; GFX7-NEXT: s_xor_b32 s0, s2, s0 ; GFX7-NEXT: s_xor_b32 s1, s3, s1 -; GFX7-NEXT: s_and_b32 s0, s0, 1 -; GFX7-NEXT: s_and_b32 s1, s1, 1 ; GFX7-NEXT: s_add_i32 s0, s4, s0 ; GFX7-NEXT: s_add_i32 s1, s5, s1 ; GFX7-NEXT: ; return to shader part epilog @@ -769,8 +764,6 @@ define amdgpu_ps <2 x i32> @s_saddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b ; GFX8-NEXT: s_cselect_b32 s3, 1, 0 ; GFX8-NEXT: s_xor_b32 s0, s2, s0 ; GFX8-NEXT: s_xor_b32 s1, s3, s1 -; GFX8-NEXT: s_and_b32 s0, s0, 1 -; GFX8-NEXT: s_and_b32 s1, s1, 1 ; GFX8-NEXT: s_add_i32 s0, s4, s0 ; GFX8-NEXT: s_add_i32 s1, s5, s1 ; GFX8-NEXT: ; return to shader part epilog @@ -789,8 +782,6 @@ define amdgpu_ps <2 x i32> @s_saddo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b ; GFX9-NEXT: s_cselect_b32 s3, 1, 0 ; GFX9-NEXT: s_xor_b32 s0, s2, s0 ; GFX9-NEXT: s_xor_b32 s1, s3, s1 -; GFX9-NEXT: s_and_b32 s0, s0, 1 -; GFX9-NEXT: s_and_b32 s1, s1, 1 ; GFX9-NEXT: s_add_i32 s0, s4, s0 ; GFX9-NEXT: s_add_i32 s1, s5, s1 ; GFX9-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.set.inactive.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.set.inactive.ll index ee89b28a0d2bb..2c44d719d0b45 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.set.inactive.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.set.inactive.ll @@ -106,7 +106,6 @@ define amdgpu_kernel void @set_inactive_scc(ptr addrspace(1) %out, i32 %in, <4 x ; GCN-NEXT: s_mov_b32 s2, 0 ; GCN-NEXT: .LBB4_2: ; %Flow ; GCN-NEXT: s_xor_b32 s2, s2, 1 -; GCN-NEXT: s_and_b32 s2, s2, 1 ; GCN-NEXT: s_cmp_lg_u32 s2, 0 ; GCN-NEXT: s_cbranch_scc1 .LBB4_4 ; GCN-NEXT: ; %bb.3: ; %.zero diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll index a354c072aa150..c295a662704e9 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll @@ -36,7 +36,6 @@ define amdgpu_kernel void @localize_constants(i1 %cond) { ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: .LBB0_2: ; %Flow ; GFX9-NEXT: s_xor_b32 s0, s0, 1 -; GFX9-NEXT: s_and_b32 s0, s0, 1 ; GFX9-NEXT: s_cmp_lg_u32 s0, 0 ; GFX9-NEXT: s_cbranch_scc1 .LBB0_4 ; GFX9-NEXT: ; %bb.3: ; %bb0 @@ -121,7 +120,6 @@ define amdgpu_kernel void @localize_globals(i1 %cond) { ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: .LBB1_2: ; %Flow ; GFX9-NEXT: s_xor_b32 s0, s0, 1 -; GFX9-NEXT: s_and_b32 s0, s0, 1 ; GFX9-NEXT: s_cmp_lg_u32 s0, 0 ; GFX9-NEXT: s_cbranch_scc1 .LBB1_4 ; GFX9-NEXT: ; %bb.3: ; %bb0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll index 755eb13a61e14..5240bf4f3a1d7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll @@ -356,7 +356,6 @@ define amdgpu_ps void @and_i1_scc(i32 inreg %a, i32 inreg %b, ptr addrspace(1) % ; OLD_RBS-NEXT: s_cmp_ge_u32 s1, 20 ; OLD_RBS-NEXT: s_cselect_b32 s3, 1, 0 ; OLD_RBS-NEXT: s_and_b32 s2, s2, s3 -; OLD_RBS-NEXT: s_and_b32 s2, s2, 1 ; OLD_RBS-NEXT: s_cmp_lg_u32 s2, 0 ; OLD_RBS-NEXT: s_cselect_b32 s0, s0, s1 ; OLD_RBS-NEXT: v_mov_b32_e32 v2, s0 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll index 4bfd29430ff1e..694a81a9668f3 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll @@ -1077,7 +1077,6 @@ define amdgpu_ps i24 @s_saddsat_i24(i24 inreg %lhs, i24 inreg %rhs) { ; GFX8-NEXT: s_xor_b32 s0, s1, s0 ; GFX8-NEXT: s_ashr_i32 s1, s3, 23 ; GFX8-NEXT: s_add_i32 s1, s1, 0xff800000 -; GFX8-NEXT: s_and_b32 s0, s0, 1 ; GFX8-NEXT: s_cmp_lg_u32 s0, 0 ; GFX8-NEXT: s_cselect_b32 s0, s1, s2 ; GFX8-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll index 2a200259a93d2..4031fe0be2823 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll @@ -171,17 +171,17 @@ define i64 @v_sdiv_i64(i64 %num, i64 %den) { ; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 ; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CHECK-NEXT: v_mul_hi_u32 v0, v4, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v0, v2 -; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v4, v1 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; CHECK-NEXT: v_sub_i32_e64 v3, s[4:5], v1, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_mul_lo_u32 v3, v0, v2 +; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v0 +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, v4, v3 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc +; CHECK-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc +; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[6:7] ; CHECK-NEXT: s_setpc_b64 s[30:31] %result = sdiv i64 %num, %den @@ -335,7 +335,6 @@ define amdgpu_ps i64 @s_sdiv_i64(i64 inreg %num, i64 inreg %den) { ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: .LBB1_3: ; %Flow ; CHECK-NEXT: s_xor_b32 s0, s0, 1 -; CHECK-NEXT: s_and_b32 s0, s0, 1 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB1_5 ; CHECK-NEXT: ; %bb.4: @@ -809,17 +808,17 @@ define <2 x i64> @v_sdiv_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 ; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CGP-NEXT: v_mul_hi_u32 v0, v10, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v0, v4 -; CGP-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v10, v1 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CGP-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v4 -; CGP-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; CGP-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_mul_lo_u32 v2, v0, v4 +; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v10, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; CGP-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v4 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CGP-NEXT: .LBB2_4: ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: v_or_b32_e32 v3, v9, v7 @@ -981,17 +980,17 @@ define <2 x i64> @v_sdiv_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 ; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 ; CGP-NEXT: v_mul_hi_u32 v2, v8, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v2, v6 -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v8, v3 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; CGP-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v6 -; CGP-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_mul_lo_u32 v4, v2, v6 +; CGP-NEXT: v_add_i32_e32 v5, vcc, 1, v2 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v8, v4 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v6 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc +; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v4, v6 +; CGP-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; CGP-NEXT: v_add_i32_e32 v5, vcc, 1, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v6 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: s_setpc_b64 s[30:31] %result = sdiv <2 x i64> %num, %den @@ -1817,17 +1816,17 @@ define i64 @v_sdiv_i64_pow2_shl_denom(i64 %x, i64 %y) { ; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 ; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CHECK-NEXT: v_mul_hi_u32 v0, v3, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v0, v5 -; CHECK-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CHECK-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; CHECK-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_mul_lo_u32 v2, v0, v5 +; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v3, v2 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc +; CHECK-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[6:7] ; CHECK-NEXT: s_setpc_b64 s[30:31] %shl.y = shl i64 4096, %y @@ -2279,17 +2278,17 @@ define <2 x i64> @v_sdiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 ; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CGP-NEXT: v_mul_hi_u32 v0, v8, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v0, v11 -; CGP-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v8, v1 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v11 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CGP-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v11 -; CGP-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; CGP-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v11 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_mul_lo_u32 v2, v0, v11 +; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v8, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v11 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; CGP-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v11 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v11 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CGP-NEXT: .LBB8_4: ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: v_or_b32_e32 v3, v7, v10 @@ -2453,17 +2452,17 @@ define <2 x i64> @v_sdiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 ; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 ; CGP-NEXT: v_mul_hi_u32 v2, v5, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v2, v9 -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v5, v3 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; CGP-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v9 -; CGP-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_mul_lo_u32 v4, v2, v9 +; CGP-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v5, v4 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v9 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc +; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v4, v9 +; CGP-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; CGP-NEXT: v_add_i32_e32 v5, vcc, 1, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v9 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: s_setpc_b64 s[30:31] %shl.y = shl <2 x i64> , %y diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll index bac80f0777c02..8300e2542d452 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll @@ -1444,7 +1444,6 @@ define i65 @v_sext_inreg_i65_22(i65 %value) { ; GFX6-NEXT: v_lshrrev_b32_e32 v3, 10, v1 ; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 1 -; GFX6-NEXT: v_lshr_b64 v[0:1], v[0:1], 0 ; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GFX6-NEXT: v_bfe_u32 v1, v1, 0, 10 ; GFX6-NEXT: v_lshlrev_b32_e32 v4, 10, v2 @@ -1459,7 +1458,6 @@ define i65 @v_sext_inreg_i65_22(i65 %value) { ; GFX8-NEXT: v_lshrrev_b32_e32 v3, 10, v1 ; GFX8-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX8-NEXT: v_bfe_i32 v2, v2, 0, 1 -; GFX8-NEXT: v_lshrrev_b64 v[0:1], 0, v[0:1] ; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GFX8-NEXT: v_bfe_u32 v1, v1, 0, 10 ; GFX8-NEXT: v_lshlrev_b32_e32 v4, 10, v2 @@ -1473,7 +1471,6 @@ define i65 @v_sext_inreg_i65_22(i65 %value) { ; GFX9-NEXT: v_lshlrev_b64 v[2:3], 22, v[2:3] ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 10, v1 ; GFX9-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX9-NEXT: v_lshrrev_b64 v[0:1], 0, v[0:1] ; GFX9-NEXT: v_bfe_i32 v2, v2, 0, 1 ; GFX9-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GFX9-NEXT: v_bfe_u32 v1, v1, 0, 10 @@ -1486,9 +1483,8 @@ define i65 @v_sext_inreg_i65_22(i65 %value) { ; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10PLUS-NEXT: v_lshlrev_b64 v[2:3], 22, v[2:3] ; GFX10PLUS-NEXT: v_lshrrev_b32_e32 v3, 10, v1 -; GFX10PLUS-NEXT: v_lshrrev_b64 v[0:1], 0, v[0:1] -; GFX10PLUS-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX10PLUS-NEXT: v_bfe_u32 v1, v1, 0, 10 +; GFX10PLUS-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX10PLUS-NEXT: v_bfe_i32 v2, v2, 0, 1 ; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GFX10PLUS-NEXT: v_lshl_or_b32 v1, v2, 10, v1 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll index 2bb42308d935c..1a10f5fb7a5ce 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll @@ -167,15 +167,15 @@ define i64 @v_srem_i64(i64 %num, i64 %den) { ; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 ; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CHECK-NEXT: v_mul_hi_u32 v0, v4, v0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: v_mul_lo_u32 v0, v0, v2 ; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v4, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, v0, v2 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, v0, v2 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: s_setpc_b64 s[30:31] %result = srem i64 %num, %den @@ -327,7 +327,6 @@ define amdgpu_ps i64 @s_srem_i64(i64 inreg %num, i64 inreg %den) { ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: .LBB1_3: ; %Flow ; CHECK-NEXT: s_xor_b32 s0, s7, 1 -; CHECK-NEXT: s_and_b32 s0, s0, 1 ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB1_5 ; CHECK-NEXT: ; %bb.4: @@ -791,15 +790,15 @@ define <2 x i64> @v_srem_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 ; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CGP-NEXT: v_mul_hi_u32 v0, v10, v0 +; CGP-NEXT: v_mov_b32_e32 v1, 0 ; CGP-NEXT: v_mul_lo_u32 v0, v0, v4 ; CGP-NEXT: v_sub_i32_e32 v0, vcc, v10, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v4 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v0, v4 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v4 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v0, v4 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CGP-NEXT: .LBB2_4: ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: v_or_b32_e32 v3, v9, v7 @@ -959,15 +958,15 @@ define <2 x i64> @v_srem_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 ; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 ; CGP-NEXT: v_mul_hi_u32 v2, v8, v2 +; CGP-NEXT: v_mov_b32_e32 v3, 0 ; CGP-NEXT: v_mul_lo_u32 v2, v2, v6 ; CGP-NEXT: v_sub_i32_e32 v2, vcc, v8, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v6 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v6 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v6 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v6 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: s_setpc_b64 s[30:31] %result = srem <2 x i64> %num, %den @@ -2328,15 +2327,15 @@ define i64 @v_srem_i64_pow2_shl_denom(i64 %x, i64 %y) { ; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 ; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CHECK-NEXT: v_mul_hi_u32 v0, v3, v0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: v_mul_lo_u32 v0, v0, v5 ; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v3, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v5 +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v0, v5 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v0, v5 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: s_setpc_b64 s[30:31] %shl.y = shl i64 4096, %y @@ -2784,15 +2783,15 @@ define <2 x i64> @v_srem_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 ; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; CGP-NEXT: v_mul_hi_u32 v0, v8, v0 +; CGP-NEXT: v_mov_b32_e32 v1, 0 ; CGP-NEXT: v_mul_lo_u32 v0, v0, v11 ; CGP-NEXT: v_sub_i32_e32 v0, vcc, v8, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v11 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v0, v11 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v11 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v11 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v0, v11 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v11 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CGP-NEXT: .LBB8_4: ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: v_or_b32_e32 v3, v7, v10 @@ -2954,15 +2953,15 @@ define <2 x i64> @v_srem_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 ; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 ; CGP-NEXT: v_mul_hi_u32 v2, v5, v2 +; CGP-NEXT: v_mov_b32_e32 v3, 0 ; CGP-NEXT: v_mul_lo_u32 v2, v2, v9 ; CGP-NEXT: v_sub_i32_e32 v2, vcc, v5, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v9 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v9 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v9 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v9 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: s_setpc_b64 s[30:31] %shl.y = shl <2 x i64> , %y diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll index 5673a6c6e869d..9fac482cb01ba 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll @@ -1078,7 +1078,6 @@ define amdgpu_ps i24 @s_ssubsat_i24(i24 inreg %lhs, i24 inreg %rhs) { ; GFX8-NEXT: s_xor_b32 s0, s1, s0 ; GFX8-NEXT: s_ashr_i32 s1, s3, 23 ; GFX8-NEXT: s_add_i32 s1, s1, 0xff800000 -; GFX8-NEXT: s_and_b32 s0, s0, 1 ; GFX8-NEXT: s_cmp_lg_u32 s0, 0 ; GFX8-NEXT: s_cselect_b32 s0, s1, s2 ; GFX8-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll index 3741983a3067b..fe2667bc4c920 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/subo.ll @@ -640,7 +640,6 @@ define amdgpu_ps i32 @s_ssubo_i32(i32 inreg %a, i32 inreg %b) { ; GFX7-NEXT: s_cmp_gt_i32 s1, 0 ; GFX7-NEXT: s_cselect_b32 s1, 1, 0 ; GFX7-NEXT: s_xor_b32 s0, s1, s0 -; GFX7-NEXT: s_and_b32 s0, s0, 1 ; GFX7-NEXT: s_sub_i32 s0, s2, s0 ; GFX7-NEXT: ; return to shader part epilog ; @@ -652,7 +651,6 @@ define amdgpu_ps i32 @s_ssubo_i32(i32 inreg %a, i32 inreg %b) { ; GFX8-NEXT: s_cmp_gt_i32 s1, 0 ; GFX8-NEXT: s_cselect_b32 s1, 1, 0 ; GFX8-NEXT: s_xor_b32 s0, s1, s0 -; GFX8-NEXT: s_and_b32 s0, s0, 1 ; GFX8-NEXT: s_sub_i32 s0, s2, s0 ; GFX8-NEXT: ; return to shader part epilog ; @@ -664,7 +662,6 @@ define amdgpu_ps i32 @s_ssubo_i32(i32 inreg %a, i32 inreg %b) { ; GFX9-NEXT: s_cmp_gt_i32 s1, 0 ; GFX9-NEXT: s_cselect_b32 s1, 1, 0 ; GFX9-NEXT: s_xor_b32 s0, s1, s0 -; GFX9-NEXT: s_and_b32 s0, s0, 1 ; GFX9-NEXT: s_sub_i32 s0, s2, s0 ; GFX9-NEXT: ; return to shader part epilog %ssubo = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) @@ -749,8 +746,6 @@ define amdgpu_ps <2 x i32> @s_ssubo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b ; GFX7-NEXT: s_cselect_b32 s3, 1, 0 ; GFX7-NEXT: s_xor_b32 s0, s2, s0 ; GFX7-NEXT: s_xor_b32 s1, s3, s1 -; GFX7-NEXT: s_and_b32 s0, s0, 1 -; GFX7-NEXT: s_and_b32 s1, s1, 1 ; GFX7-NEXT: s_sub_i32 s0, s4, s0 ; GFX7-NEXT: s_sub_i32 s1, s5, s1 ; GFX7-NEXT: ; return to shader part epilog @@ -769,8 +764,6 @@ define amdgpu_ps <2 x i32> @s_ssubo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b ; GFX8-NEXT: s_cselect_b32 s3, 1, 0 ; GFX8-NEXT: s_xor_b32 s0, s2, s0 ; GFX8-NEXT: s_xor_b32 s1, s3, s1 -; GFX8-NEXT: s_and_b32 s0, s0, 1 -; GFX8-NEXT: s_and_b32 s1, s1, 1 ; GFX8-NEXT: s_sub_i32 s0, s4, s0 ; GFX8-NEXT: s_sub_i32 s1, s5, s1 ; GFX8-NEXT: ; return to shader part epilog @@ -789,8 +782,6 @@ define amdgpu_ps <2 x i32> @s_ssubo_v2i32(<2 x i32> inreg %a, <2 x i32> inreg %b ; GFX9-NEXT: s_cselect_b32 s3, 1, 0 ; GFX9-NEXT: s_xor_b32 s0, s2, s0 ; GFX9-NEXT: s_xor_b32 s1, s3, s1 -; GFX9-NEXT: s_and_b32 s0, s0, 1 -; GFX9-NEXT: s_and_b32 s1, s1, 1 ; GFX9-NEXT: s_sub_i32 s0, s4, s0 ; GFX9-NEXT: s_sub_i32 s1, s5, s1 ; GFX9-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll index a292266fbbf0d..018e5fb6ee3b8 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll @@ -156,24 +156,24 @@ define i64 @v_udiv_i64(i64 %num, i64 %den) { ; CHECK-NEXT: s_cbranch_execz .LBB0_2 ; CHECK-NEXT: .LBB0_4: ; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v6 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, 0, v2 +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, 0, v2 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CHECK-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v1, v0 -; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 -; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CHECK-NEXT: v_mul_lo_u32 v3, v3, v0 +; CHECK-NEXT: v_mul_hi_u32 v3, v0, v3 +; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3 ; CHECK-NEXT: v_mul_hi_u32 v0, v4, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v0, v2 -; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v4, v1 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; CHECK-NEXT: v_sub_i32_e64 v3, s[4:5], v1, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_mul_lo_u32 v3, v0, v2 +; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v0 +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, v4, v3 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc +; CHECK-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc +; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v3, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[6:7] ; CHECK-NEXT: s_setpc_b64 s[30:31] %result = udiv i64 %num, %den @@ -323,7 +323,6 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) { ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: .LBB1_3: ; %Flow ; CHECK-NEXT: s_xor_b32 s1, s6, 1 -; CHECK-NEXT: s_and_b32 s1, s1, 1 ; CHECK-NEXT: s_cmp_lg_u32 s1, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB1_5 ; CHECK-NEXT: ; %bb.4: @@ -765,24 +764,24 @@ define <2 x i64> @v_udiv_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: s_cbranch_execz .LBB2_4 ; CGP-NEXT: ; %bb.3: ; CGP-NEXT: v_rcp_iflag_f32_e32 v0, v2 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, 0, v4 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, 0, v4 +; CGP-NEXT: v_mov_b32_e32 v1, 0 ; CGP-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CGP-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v1, v0 -; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 -; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CGP-NEXT: v_mul_lo_u32 v2, v2, v0 +; CGP-NEXT: v_mul_hi_u32 v2, v0, v2 +; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2 ; CGP-NEXT: v_mul_hi_u32 v0, v10, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v0, v4 -; CGP-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v10, v1 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CGP-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v4 -; CGP-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; CGP-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_mul_lo_u32 v2, v0, v4 +; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v10, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; CGP-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v4 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v4 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CGP-NEXT: .LBB2_4: ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: v_or_b32_e32 v3, v9, v7 @@ -931,24 +930,24 @@ define <2 x i64> @v_udiv_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: s_cbranch_execz .LBB2_6 ; CGP-NEXT: .LBB2_8: ; CGP-NEXT: v_rcp_iflag_f32_e32 v2, v4 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, 0, v6 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, 0, v6 +; CGP-NEXT: v_mov_b32_e32 v3, 0 ; CGP-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 ; CGP-NEXT: v_cvt_u32_f32_e32 v2, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v3, v2 -; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 -; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; CGP-NEXT: v_mul_lo_u32 v4, v4, v2 +; CGP-NEXT: v_mul_hi_u32 v4, v2, v4 +; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4 ; CGP-NEXT: v_mul_hi_u32 v2, v8, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v2, v6 -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v8, v3 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; CGP-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v6 -; CGP-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_mul_lo_u32 v4, v2, v6 +; CGP-NEXT: v_add_i32_e32 v5, vcc, 1, v2 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v8, v4 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v6 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc +; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v4, v6 +; CGP-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; CGP-NEXT: v_add_i32_e32 v5, vcc, 1, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v6 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: s_setpc_b64 s[30:31] %result = udiv <2 x i64> %num, %den @@ -1219,24 +1218,24 @@ define i64 @v_udiv_i64_pow2_shl_denom(i64 %x, i64 %y) { ; CHECK-NEXT: s_cbranch_execz .LBB7_2 ; CHECK-NEXT: .LBB7_4: ; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v2 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, 0, v5 +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, 0, v5 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CHECK-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v1, v0 -; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 -; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CHECK-NEXT: v_mul_lo_u32 v2, v2, v0 +; CHECK-NEXT: v_mul_hi_u32 v2, v0, v2 +; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2 ; CHECK-NEXT: v_mul_hi_u32 v0, v3, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v0, v5 -; CHECK-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CHECK-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; CHECK-NEXT: v_add_i32_e32 v2, vcc, 1, v0 -; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v1, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_mul_lo_u32 v2, v0, v5 +; CHECK-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v3, v2 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc +; CHECK-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; CHECK-NEXT: v_add_i32_e32 v3, vcc, 1, v0 +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v2, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[6:7] ; CHECK-NEXT: s_setpc_b64 s[30:31] %shl.y = shl i64 4096, %y @@ -1657,24 +1656,24 @@ define <2 x i64> @v_udiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: s_cbranch_execz .LBB8_4 ; CGP-NEXT: ; %bb.3: ; CGP-NEXT: v_rcp_iflag_f32_e32 v0, v4 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, 0, v2 +; CGP-NEXT: v_sub_i32_e32 v3, vcc, 0, v2 +; CGP-NEXT: v_mov_b32_e32 v1, 0 ; CGP-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CGP-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v1, v0 -; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 -; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CGP-NEXT: v_mul_lo_u32 v3, v3, v0 +; CGP-NEXT: v_mul_hi_u32 v3, v0, v3 +; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v3 ; CGP-NEXT: v_mul_hi_u32 v0, v8, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v0, v2 -; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v8, v1 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; CGP-NEXT: v_sub_i32_e64 v3, s[4:5], v1, v2 -; CGP-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; CGP-NEXT: v_add_i32_e32 v3, vcc, 1, v0 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v1, v2 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_mul_lo_u32 v3, v0, v2 +; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; CGP-NEXT: v_sub_i32_e32 v3, vcc, v8, v3 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v2 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc +; CGP-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v2 +; CGP-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc +; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v0 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v2 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; CGP-NEXT: .LBB8_4: ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: v_or_b32_e32 v3, v7, v10 @@ -1823,24 +1822,24 @@ define <2 x i64> @v_udiv_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: s_cbranch_execz .LBB8_6 ; CGP-NEXT: .LBB8_8: ; CGP-NEXT: v_rcp_iflag_f32_e32 v2, v4 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, 0, v9 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, 0, v9 +; CGP-NEXT: v_mov_b32_e32 v3, 0 ; CGP-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 ; CGP-NEXT: v_cvt_u32_f32_e32 v2, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v3, v2 -; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 -; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; CGP-NEXT: v_mul_lo_u32 v4, v4, v2 +; CGP-NEXT: v_mul_hi_u32 v4, v2, v4 +; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4 ; CGP-NEXT: v_mul_hi_u32 v2, v5, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v2, v9 -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v5, v3 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; CGP-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v9 -; CGP-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; CGP-NEXT: v_add_i32_e32 v4, vcc, 1, v2 -; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v3, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc -; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_mul_lo_u32 v4, v2, v9 +; CGP-NEXT: v_add_i32_e32 v6, vcc, 1, v2 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v5, v4 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v9 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc +; CGP-NEXT: v_sub_i32_e64 v5, s[4:5], v4, v9 +; CGP-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; CGP-NEXT: v_add_i32_e32 v5, vcc, 1, v2 +; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v4, v9 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[6:7] ; CGP-NEXT: s_setpc_b64 s[30:31] %shl.y = shl <2 x i64> , %y diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll index f29c2c2484456..51d5253f87920 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll @@ -155,22 +155,22 @@ define i64 @v_urem_i64(i64 %num, i64 %den) { ; CHECK-NEXT: s_cbranch_execz .LBB0_2 ; CHECK-NEXT: .LBB0_4: ; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v6 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, 0, v2 +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, 0, v2 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CHECK-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v1, v0 -; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 -; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CHECK-NEXT: v_mul_lo_u32 v3, v3, v0 +; CHECK-NEXT: v_mul_hi_u32 v3, v0, v3 +; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v3 ; CHECK-NEXT: v_mul_hi_u32 v0, v4, v0 ; CHECK-NEXT: v_mul_lo_u32 v0, v0, v2 ; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v4, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, v0, v2 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; CHECK-NEXT: v_sub_i32_e32 v3, vcc, v0, v2 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: s_setpc_b64 s[30:31] %result = urem i64 %num, %den @@ -319,7 +319,6 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) { ; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1 ; CHECK-NEXT: .LBB1_3: ; %Flow ; CHECK-NEXT: s_xor_b32 s1, s6, 1 -; CHECK-NEXT: s_and_b32 s1, s1, 1 ; CHECK-NEXT: s_cmp_lg_u32 s1, 0 ; CHECK-NEXT: s_cbranch_scc1 .LBB1_5 ; CHECK-NEXT: ; %bb.4: @@ -756,22 +755,22 @@ define <2 x i64> @v_urem_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: s_cbranch_execz .LBB2_4 ; CGP-NEXT: ; %bb.3: ; CGP-NEXT: v_rcp_iflag_f32_e32 v0, v2 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, 0, v4 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, 0, v4 +; CGP-NEXT: v_mov_b32_e32 v1, 0 ; CGP-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CGP-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v1, v0 -; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 -; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CGP-NEXT: v_mul_lo_u32 v2, v2, v0 +; CGP-NEXT: v_mul_hi_u32 v2, v0, v2 +; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v2 ; CGP-NEXT: v_mul_hi_u32 v0, v10, v0 ; CGP-NEXT: v_mul_lo_u32 v0, v0, v4 ; CGP-NEXT: v_sub_i32_e32 v0, vcc, v10, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v4 +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v0, v4 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v4 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; CGP-NEXT: v_sub_i32_e32 v2, vcc, v0, v4 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v4 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CGP-NEXT: .LBB2_4: ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: v_or_b32_e32 v3, v9, v7 @@ -919,22 +918,22 @@ define <2 x i64> @v_urem_v2i64(<2 x i64> %num, <2 x i64> %den) { ; CGP-NEXT: s_cbranch_execz .LBB2_6 ; CGP-NEXT: .LBB2_8: ; CGP-NEXT: v_rcp_iflag_f32_e32 v2, v4 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, 0, v6 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, 0, v6 +; CGP-NEXT: v_mov_b32_e32 v3, 0 ; CGP-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 ; CGP-NEXT: v_cvt_u32_f32_e32 v2, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v3, v2 -; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 -; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; CGP-NEXT: v_mul_lo_u32 v4, v4, v2 +; CGP-NEXT: v_mul_hi_u32 v4, v2, v4 +; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4 ; CGP-NEXT: v_mul_hi_u32 v2, v8, v2 ; CGP-NEXT: v_mul_lo_u32 v2, v2, v6 ; CGP-NEXT: v_sub_i32_e32 v2, vcc, v8, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v6 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v6 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v6 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v6 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v6 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: s_setpc_b64 s[30:31] %result = urem <2 x i64> %num, %den @@ -1644,22 +1643,22 @@ define i64 @v_urem_i64_pow2_shl_denom(i64 %x, i64 %y) { ; CHECK-NEXT: s_cbranch_execz .LBB7_2 ; CHECK-NEXT: .LBB7_4: ; CHECK-NEXT: v_rcp_iflag_f32_e32 v0, v2 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, 0, v5 +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, 0, v5 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 ; CHECK-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CHECK-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CHECK-NEXT: v_mul_lo_u32 v1, v1, v0 -; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 -; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CHECK-NEXT: v_mul_lo_u32 v2, v2, v0 +; CHECK-NEXT: v_mul_hi_u32 v2, v0, v2 +; CHECK-NEXT: v_add_i32_e32 v0, vcc, v0, v2 ; CHECK-NEXT: v_mul_hi_u32 v0, v3, v0 ; CHECK-NEXT: v_mul_lo_u32 v0, v0, v5 ; CHECK-NEXT: v_sub_i32_e32 v0, vcc, v3, v0 -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v5 +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v0, v5 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_sub_i32_e32 v1, vcc, v0, v5 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; CHECK-NEXT: v_sub_i32_e32 v2, vcc, v0, v5 ; CHECK-NEXT: v_cmp_ge_u32_e32 vcc, v0, v5 -; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: s_setpc_b64 s[30:31] %shl.y = shl i64 4096, %y @@ -2077,22 +2076,22 @@ define <2 x i64> @v_urem_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: s_cbranch_execz .LBB8_4 ; CGP-NEXT: ; %bb.3: ; CGP-NEXT: v_rcp_iflag_f32_e32 v0, v4 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, 0, v2 +; CGP-NEXT: v_sub_i32_e32 v3, vcc, 0, v2 +; CGP-NEXT: v_mov_b32_e32 v1, 0 ; CGP-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; CGP-NEXT: v_cvt_u32_f32_e32 v0, v0 -; CGP-NEXT: v_mul_lo_u32 v1, v1, v0 -; CGP-NEXT: v_mul_hi_u32 v1, v0, v1 -; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; CGP-NEXT: v_mul_lo_u32 v3, v3, v0 +; CGP-NEXT: v_mul_hi_u32 v3, v0, v3 +; CGP-NEXT: v_add_i32_e32 v0, vcc, v0, v3 ; CGP-NEXT: v_mul_hi_u32 v0, v8, v0 ; CGP-NEXT: v_mul_lo_u32 v0, v0, v2 ; CGP-NEXT: v_sub_i32_e32 v0, vcc, v8, v0 -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; CGP-NEXT: v_sub_i32_e32 v3, vcc, v0, v2 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; CGP-NEXT: v_sub_i32_e32 v3, vcc, v0, v2 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; CGP-NEXT: v_mov_b32_e32 v1, 0 +; CGP-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; CGP-NEXT: .LBB8_4: ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: v_or_b32_e32 v3, v7, v10 @@ -2240,22 +2239,22 @@ define <2 x i64> @v_urem_v2i64_pow2_shl_denom(<2 x i64> %x, <2 x i64> %y) { ; CGP-NEXT: s_cbranch_execz .LBB8_6 ; CGP-NEXT: .LBB8_8: ; CGP-NEXT: v_rcp_iflag_f32_e32 v2, v4 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, 0, v9 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, 0, v9 +; CGP-NEXT: v_mov_b32_e32 v3, 0 ; CGP-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 ; CGP-NEXT: v_cvt_u32_f32_e32 v2, v2 -; CGP-NEXT: v_mul_lo_u32 v3, v3, v2 -; CGP-NEXT: v_mul_hi_u32 v3, v2, v3 -; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; CGP-NEXT: v_mul_lo_u32 v4, v4, v2 +; CGP-NEXT: v_mul_hi_u32 v4, v2, v4 +; CGP-NEXT: v_add_i32_e32 v2, vcc, v2, v4 ; CGP-NEXT: v_mul_hi_u32 v2, v5, v2 ; CGP-NEXT: v_mul_lo_u32 v2, v2, v9 ; CGP-NEXT: v_sub_i32_e32 v2, vcc, v5, v2 -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v9 +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v9 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_sub_i32_e32 v3, vcc, v2, v9 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; CGP-NEXT: v_sub_i32_e32 v4, vcc, v2, v9 ; CGP-NEXT: v_cmp_ge_u32_e32 vcc, v2, v9 -; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; CGP-NEXT: v_mov_b32_e32 v3, 0 +; CGP-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; CGP-NEXT: s_or_b64 exec, exec, s[4:5] ; CGP-NEXT: s_setpc_b64 s[30:31] %shl.y = shl <2 x i64> , %y diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll index 23fbd91c35a5b..4fe11760e71fd 100644 --- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll +++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll @@ -1064,8 +1064,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_with_select(ptr addrspace(1) no ; GFX9-GISEL-NEXT: global_load_ubyte v4, v1, s[2:3] offset:3 ; GFX9-GISEL-NEXT: global_load_ubyte v5, v1, s[2:3] offset:4 ; GFX9-GISEL-NEXT: global_load_ubyte v6, v1, s[2:3] offset:5 -; GFX9-GISEL-NEXT: global_load_ubyte v7, v1, s[2:3] offset:6 -; GFX9-GISEL-NEXT: global_load_ubyte v8, v1, s[2:3] offset:7 +; GFX9-GISEL-NEXT: global_load_ubyte v7, v1, s[2:3] offset:7 +; GFX9-GISEL-NEXT: global_load_ubyte v8, v1, s[2:3] offset:6 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(6) ; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 8, v0 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(5) @@ -1076,15 +1076,15 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i64_with_select(ptr addrspace(1) no ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(2) ; GFX9-GISEL-NEXT: v_lshl_or_b32 v4, v6, 8, v5 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(1) -; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v5, 24, v7 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) -; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v8, 24, v5 -; GFX9-GISEL-NEXT: v_or3_b32 v3, v0, v4, 0 -; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v2 -; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v4, v3 -; GFX9-GISEL-NEXT: v_add_u32_e32 v0, 32, v0 +; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v6, 16, v8 +; GFX9-GISEL-NEXT: v_or3_b32 v3, v5, v6, v4 +; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v4, v2 +; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v3 +; GFX9-GISEL-NEXT: v_add_u32_e32 v4, 32, v4 ; GFX9-GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3] -; GFX9-GISEL-NEXT: v_min_u32_e32 v0, v4, v0 +; GFX9-GISEL-NEXT: v_min_u32_e32 v0, v0, v4 ; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc ; GFX9-GISEL-NEXT: global_store_dwordx2 v1, v[0:1], s[0:1] ; GFX9-GISEL-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll index c4a742f4bf08d..9fcfbba6fb235 100644 --- a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll +++ b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll @@ -1028,8 +1028,8 @@ define amdgpu_kernel void @v_cttz_zero_undef_i64_with_select(ptr addrspace(1) no ; GFX9-GISEL-NEXT: global_load_ubyte v4, v1, s[2:3] offset:3 ; GFX9-GISEL-NEXT: global_load_ubyte v5, v1, s[2:3] offset:4 ; GFX9-GISEL-NEXT: global_load_ubyte v6, v1, s[2:3] offset:5 -; GFX9-GISEL-NEXT: global_load_ubyte v7, v1, s[2:3] offset:6 -; GFX9-GISEL-NEXT: global_load_ubyte v8, v1, s[2:3] offset:7 +; GFX9-GISEL-NEXT: global_load_ubyte v7, v1, s[2:3] offset:7 +; GFX9-GISEL-NEXT: global_load_ubyte v8, v1, s[2:3] offset:6 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(6) ; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 8, v0 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(5) @@ -1040,10 +1040,10 @@ define amdgpu_kernel void @v_cttz_zero_undef_i64_with_select(ptr addrspace(1) no ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(2) ; GFX9-GISEL-NEXT: v_lshl_or_b32 v4, v6, 8, v5 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(1) -; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v5, 24, v7 ; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) -; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v8, 24, v5 -; GFX9-GISEL-NEXT: v_or3_b32 v3, v0, v4, 0 +; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v6, 16, v8 +; GFX9-GISEL-NEXT: v_or3_b32 v3, v5, v6, v4 ; GFX9-GISEL-NEXT: v_ffbl_b32_e32 v4, v3 ; GFX9-GISEL-NEXT: v_ffbl_b32_e32 v0, v2 ; GFX9-GISEL-NEXT: v_add_u32_e32 v4, 32, v4 diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll index c383b90e1cdb1..06c0417211809 100644 --- a/llvm/test/CodeGen/AMDGPU/div_i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll @@ -1426,7 +1426,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v2 @@ -1623,10 +1623,8 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[6:7] ; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9 -; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f -; GFX9-G-O0-NEXT: s_mov_b32 s6, 0 -; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s7 -; GFX9-G-O0-NEXT: v_xor_b32_e64 v3, v3, s6 +; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f +; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s6 ; GFX9-G-O0-NEXT: v_or_b32_e64 v6, v6, v8 ; GFX9-G-O0-NEXT: v_or_b32_e64 v3, v3, v7 ; GFX9-G-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec @@ -1667,11 +1665,11 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec -; GFX9-G-O0-NEXT: ; implicit-def: $vgpr34 : SGPR spill to VGPR lane -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s4, 0 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s5, 1 +; GFX9-G-O0-NEXT: ; implicit-def: $vgpr31 : SGPR spill to VGPR lane +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s4, 0 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s5, 1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v31, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7] ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] @@ -1679,11 +1677,11 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB0_8 ; GFX9-G-O0-NEXT: .LBB0_1: ; %Flow ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s4, v34, 2 -; GFX9-G-O0-NEXT: v_readlane_b32 s5, v34, 3 +; GFX9-G-O0-NEXT: v_readlane_b32 s4, v31, 2 +; GFX9-G-O0-NEXT: v_readlane_b32 s5, v31, 3 ; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow ; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload @@ -1713,11 +1711,11 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB0_5 ; GFX9-G-O0-NEXT: .LBB0_3: ; %Flow2 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s4, v34, 0 -; GFX9-G-O0-NEXT: v_readlane_b32 s5, v34, 1 +; GFX9-G-O0-NEXT: v_readlane_b32 s4, v31, 0 +; GFX9-G-O0-NEXT: v_readlane_b32 s5, v31, 1 ; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload @@ -1733,10 +1731,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_branch .LBB0_9 ; GFX9-G-O0-NEXT: .LBB0_4: ; %udiv-loop-exit -; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload @@ -1749,33 +1747,32 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7 ; GFX9-G-O0-NEXT: s_mov_b32 s4, 1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3] +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[9:10], v0, v[2:3] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5] ; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s4, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v5, v2, v3 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11 -; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7 -; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v11 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v13 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v14 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v10 +; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v6 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v1, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9 -; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7 -; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v8 +; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v5, v6 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v2, v3 ; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec @@ -1789,11 +1786,11 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB0_3 ; GFX9-G-O0-NEXT: .LBB0_5: ; %Flow1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s4, v34, 4 -; GFX9-G-O0-NEXT: v_readlane_b32 s5, v34, 5 +; GFX9-G-O0-NEXT: v_readlane_b32 s4, v31, 4 +; GFX9-G-O0-NEXT: v_readlane_b32 s5, v31, 5 ; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload @@ -1820,11 +1817,11 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: .LBB0_6: ; %udiv-do-while ; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s6, v34, 6 -; GFX9-G-O0-NEXT: v_readlane_b32 s7, v34, 7 +; GFX9-G-O0-NEXT: v_readlane_b32 s6, v31, 6 +; GFX9-G-O0-NEXT: v_readlane_b32 s7, v31, 7 ; GFX9-G-O0-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload @@ -1837,10 +1834,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload @@ -1854,70 +1851,65 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16) -; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v4 ; GFX9-G-O0-NEXT: s_mov_b32 s8, 1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[14:15], v2, v[0:1] -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4] -; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[2:3], v2, v[0:1] +; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s8 +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v4, v[14:15] +; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr0 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s9, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5 -; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3 -; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v4 +; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr4_vgpr5 killed $exec +; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v0, v1 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr22_vgpr23 killed $exec ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v24 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v25 -; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec +; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr0 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s9, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v14 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v15 -; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3 -; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v0, v1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v22 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v23 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v24 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v25 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[26:27], v0, v[2:3] +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[25:26], v0, v[2:3] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s8 ; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[14:15] ; GFX9-G-O0-NEXT: ; kill: def $vgpr14 killed $vgpr2 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s8, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v22, v2, v3 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v15, v2, v3 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(10) -; GFX9-G-O0-NEXT: v_mov_b32_e32 v28, v30 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v27 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v28 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8) -; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v32 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v25, v33 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v28 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v29 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v26 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v27 -; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v23 -; GFX9-G-O0-NEXT: v_or_b32_e64 v15, v1, v15 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v29 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v24, v30 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v25 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v26 +; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v22 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v1, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v23, v24 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v25 -; GFX9-G-O0-NEXT: v_or3_b32 v14, v14, v22, v23 -; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v15 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v22, v23 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v24 +; GFX9-G-O0-NEXT: v_or3_b32 v14, v14, v15, v22 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v2, v3 ; GFX9-G-O0-NEXT: ; kill: def $vgpr14 killed $vgpr14 def $vgpr14_vgpr15 killed $exec ; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec @@ -2004,13 +1996,13 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s6, 2 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s7, 3 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s6, 2 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s7, 3 ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s6, 6 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s7, 7 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s6, 6 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s7, 7 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v31, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_nop 0 @@ -2037,7 +2029,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB0_1 ; GFX9-G-O0-NEXT: .LBB0_7: ; %udiv-preheader ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload @@ -2122,10 +2114,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9] ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s8, 6 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s9, 7 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s8, 6 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s9, 7 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v31, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6 @@ -2154,7 +2146,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB0_6 ; GFX9-G-O0-NEXT: .LBB0_8: ; %udiv-bb1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload @@ -2267,10 +2259,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec ; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] ; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s6, 4 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s7, 5 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s6, 4 +; GFX9-G-O0-NEXT: v_writelane_b32 v31, s7, 5 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[20:21], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v31, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[20:21] ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] ; GFX9-G-O0-NEXT: s_cbranch_execz .LBB0_5 @@ -2303,7 +2295,7 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v2, s[4:5], v2, v5, s[4:5] ; GFX9-G-O0-NEXT: v_subb_co_u32_e64 v3, s[4:5], v3, v4, s[4:5] ; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31] @@ -3556,7 +3548,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0: ; %bb.0: ; %_udiv-special-cases ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v10, v1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v2 @@ -3707,10 +3699,8 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-G-O0-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[6:7] ; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v4, v9 -; GFX9-G-O0-NEXT: s_mov_b32 s7, 0x7f -; GFX9-G-O0-NEXT: s_mov_b32 s6, 0 -; GFX9-G-O0-NEXT: v_xor_b32_e64 v5, v5, s7 -; GFX9-G-O0-NEXT: v_xor_b32_e64 v6, v6, s6 +; GFX9-G-O0-NEXT: s_mov_b32 s6, 0x7f +; GFX9-G-O0-NEXT: v_xor_b32_e64 v5, v5, s6 ; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v5, v8 ; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v6, v7 ; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec @@ -3759,11 +3749,11 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], exec -; GFX9-G-O0-NEXT: ; implicit-def: $vgpr34 : SGPR spill to VGPR lane -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s4, 0 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s5, 1 +; GFX9-G-O0-NEXT: ; implicit-def: $vgpr32 : SGPR spill to VGPR lane +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s4, 0 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s5, 1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v32, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7] ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] @@ -3771,11 +3761,11 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB1_8 ; GFX9-G-O0-NEXT: .LBB1_1: ; %Flow ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s4, v34, 2 -; GFX9-G-O0-NEXT: v_readlane_b32 s5, v34, 3 +; GFX9-G-O0-NEXT: v_readlane_b32 s4, v32, 2 +; GFX9-G-O0-NEXT: v_readlane_b32 s5, v32, 3 ; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-G-O0-NEXT: ; %bb.2: ; %Flow ; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload @@ -3805,11 +3795,11 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB1_5 ; GFX9-G-O0-NEXT: .LBB1_3: ; %Flow2 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s4, v34, 0 -; GFX9-G-O0-NEXT: v_readlane_b32 s5, v34, 1 +; GFX9-G-O0-NEXT: v_readlane_b32 s4, v32, 0 +; GFX9-G-O0-NEXT: v_readlane_b32 s5, v32, 1 ; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload @@ -3825,10 +3815,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_branch .LBB1_9 ; GFX9-G-O0-NEXT: .LBB1_4: ; %udiv-loop-exit -; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload @@ -3841,33 +3831,32 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v7 ; GFX9-G-O0-NEXT: s_mov_b32 s4, 1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[10:11], v0, v[2:3] +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[9:10], v0, v[2:3] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-G-O0-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5] ; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr2 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s4, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v6, v2, v3 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v5, v2, v3 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v15 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v16 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v17 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v12 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v10 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v11 -; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v7 -; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v1, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v11 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v13 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v8, v14 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v9 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v10 +; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v6 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v1, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v7, v8 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v9 -; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v6, v7 -; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v7 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v8 +; GFX9-G-O0-NEXT: v_or3_b32 v4, v4, v5, v6 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v2, v3 ; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec ; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec @@ -3881,11 +3870,11 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB1_3 ; GFX9-G-O0-NEXT: .LBB1_5: ; %Flow1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s4, v34, 4 -; GFX9-G-O0-NEXT: v_readlane_b32 s5, v34, 5 +; GFX9-G-O0-NEXT: v_readlane_b32 s4, v32, 4 +; GFX9-G-O0-NEXT: v_readlane_b32 s5, v32, 5 ; GFX9-G-O0-NEXT: s_or_b64 exec, exec, s[4:5] ; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -3912,11 +3901,11 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: .LBB1_6: ; %udiv-do-while ; GFX9-G-O0-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) -; GFX9-G-O0-NEXT: v_readlane_b32 s6, v34, 6 -; GFX9-G-O0-NEXT: v_readlane_b32 s7, v34, 7 +; GFX9-G-O0-NEXT: v_readlane_b32 s6, v32, 6 +; GFX9-G-O0-NEXT: v_readlane_b32 s7, v32, 7 ; GFX9-G-O0-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload @@ -3929,10 +3918,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; GFX9-G-O0-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload @@ -3946,36 +3935,32 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v3 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(16) -; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v4 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v5 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v4 ; GFX9-G-O0-NEXT: s_mov_b32 s8, 1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[20:21], v2, v[0:1] -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v2, v[3:4] -; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[2:3], v2, v[0:1] +; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, s8 +; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v4, v[20:21] +; GFX9-G-O0-NEXT: ; kill: def $vgpr7 killed $vgpr0 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s9, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v4 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5 -; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v2, v3 -; GFX9-G-O0-NEXT: v_or_b32_e64 v5, v0, v1 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v4 +; GFX9-G-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 killed $vgpr4_vgpr5 killed $exec +; GFX9-G-O0-NEXT: v_or_b32_e64 v7, v0, v1 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr12_vgpr13 killed $exec ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v14 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v15 -; GFX9-G-O0-NEXT: ; kill: def $vgpr2 killed $vgpr0 killed $exec +; GFX9-G-O0-NEXT: ; kill: def $vgpr4 killed $vgpr0 killed $exec ; GFX9-G-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr0_vgpr1 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s9, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s9 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v3, v0, v1 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v20 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v21 -; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v2, v3 -; GFX9-G-O0-NEXT: v_or_b32_e64 v9, v0, v1 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v1, v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-G-O0-NEXT: v_or_b32_e64 v4, v0, v1 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v12 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v13 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v14 @@ -3988,28 +3973,27 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 killed $vgpr2_vgpr3 killed $exec ; GFX9-G-O0-NEXT: s_mov_b32 s8, 31 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v14, v2, v3 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, 0 +; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v13, v2, v3 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v12, v0 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v1 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(10) -; GFX9-G-O0-NEXT: v_mov_b32_e32 v28, v30 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v29, v31 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v28 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v29 ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(8) -; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v32 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v33 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v28 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v29 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v22 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v23 -; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v15 -; GFX9-G-O0-NEXT: v_or_b32_e64 v13, v1, v13 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v20, v30 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v21, v31 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v1 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v22 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v23 +; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v14 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v1, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec -; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v13 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, v20 -; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v21 -; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v14, v15 -; GFX9-G-O0-NEXT: v_or3_b32 v2, v2, v3, v13 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v2 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, v20 +; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v21 +; GFX9-G-O0-NEXT: v_or3_b32 v12, v12, v13, v14 +; GFX9-G-O0-NEXT: v_or_b32_e64 v2, v2, v3 ; GFX9-G-O0-NEXT: ; kill: def $vgpr12 killed $vgpr12 def $vgpr12_vgpr13 killed $exec ; GFX9-G-O0-NEXT: v_mov_b32_e32 v13, v2 ; GFX9-G-O0-NEXT: ; kill: def $vgpr0_vgpr1 killed $vgpr0_vgpr1 def $vgpr0_vgpr1_vgpr2_vgpr3 killed $exec @@ -4104,13 +4088,13 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s6, 2 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s7, 3 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s6, 2 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s7, 3 ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[4:5] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s6, 6 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s7, 7 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s6, 6 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s7, 7 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v32, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_nop 0 @@ -4137,7 +4121,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB1_1 ; GFX9-G-O0-NEXT: .LBB1_7: ; %udiv-preheader ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload @@ -4227,10 +4211,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 s[4:5], s[8:9] ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], s[8:9] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s8, 6 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s9, 7 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s8, 6 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s9, 7 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v32, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: v_mov_b32_e32 v15, s7 ; GFX9-G-O0-NEXT: v_mov_b32_e32 v14, s6 @@ -4259,7 +4243,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_branch .LBB1_6 ; GFX9-G-O0-NEXT: .LBB1_8: ; %udiv-bb1 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload @@ -4372,10 +4356,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_mov_b64 s[6:7], exec ; GFX9-G-O0-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5] ; GFX9-G-O0-NEXT: s_xor_b64 s[6:7], s[4:5], s[6:7] -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s6, 4 -; GFX9-G-O0-NEXT: v_writelane_b32 v34, s7, 5 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s6, 4 +; GFX9-G-O0-NEXT: v_writelane_b32 v32, s7, 5 ; GFX9-G-O0-NEXT: s_or_saveexec_b64 s[18:19], -1 -; GFX9-G-O0-NEXT: buffer_store_dword v34, off, s[0:3], s32 ; 4-byte Folded Spill +; GFX9-G-O0-NEXT: buffer_store_dword v32, off, s[0:3], s32 ; 4-byte Folded Spill ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[18:19] ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] ; GFX9-G-O0-NEXT: s_cbranch_execz .LBB1_5 @@ -4394,7 +4378,7 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) { ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v6 ; GFX9-G-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; GFX9-G-O0-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; GFX9-G-O0-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; GFX9-G-O0-NEXT: s_mov_b64 exec, s[4:5] ; GFX9-G-O0-NEXT: s_waitcnt vmcnt(0) ; GFX9-G-O0-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll index 64c887d570e54..dbd5c3daebc49 100644 --- a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll +++ b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll @@ -917,7 +917,6 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i ; GFX9-GISEL-NEXT: s_mov_b32 s4, 0 ; GFX9-GISEL-NEXT: .LBB7_4: ; %Flow ; GFX9-GISEL-NEXT: s_xor_b32 s4, s4, 1 -; GFX9-GISEL-NEXT: s_and_b32 s4, s4, 1 ; GFX9-GISEL-NEXT: s_cmp_lg_u32 s4, 0 ; GFX9-GISEL-NEXT: s_cbranch_scc1 .LBB7_6 ; GFX9-GISEL-NEXT: ; %bb.5: ; %bb.0 @@ -1016,8 +1015,7 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i ; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-GISEL-NEXT: .LBB7_4: ; %Flow ; GFX11-GISEL-NEXT: s_xor_b32 s0, s0, 1 -; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX11-GISEL-NEXT: s_cbranch_scc1 .LBB7_6 ; GFX11-GISEL-NEXT: ; %bb.5: ; %bb.0 diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll index 9c9c0555638fb..3465c782bd700 100644 --- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll @@ -219,9 +219,8 @@ define i128 @fptosi_f64_to_i128(double %x) { ; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0 -; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0 +; GISEL-NEXT: v_lshl_or_b32 v9, v0, 16, v0 ; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1 -; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0 ; GISEL-NEXT: v_mov_b32_e32 v0, 0x433 ; GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5 @@ -235,30 +234,29 @@ define i128 @fptosi_f64_to_i128(double %x) { ; GISEL-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6 ; GISEL-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5] ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7 -; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0 +; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v0, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v1, vcc +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v10, v9, 0 ; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffb8d, v6 ; GISEL-NEXT: v_sub_u32_e32 v2, 64, v7 ; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5] ; GISEL-NEXT: v_lshlrev_b64 v[4:5], v6, v[4:5] ; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v7 -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v11, v9, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0 +; GISEL-NEXT: v_cndmask_b32_e64 v12, v2, 0, s[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v10, v8, 0 ; GISEL-NEXT: v_mov_b32_e32 v2, v6 -; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10 +; GISEL-NEXT: v_mul_lo_u32 v6, v10, v9 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v10, v9, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v4, v11, v9 ; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2] +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v11, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11] ; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] -; GISEL-NEXT: ; implicit-def: $vgpr10 ; GISEL-NEXT: ; implicit-def: $vgpr9 ; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7] ; GISEL-NEXT: ; implicit-def: $vgpr6 @@ -275,13 +273,13 @@ define i128 @fptosi_f64_to_i128(double %x) { ; GISEL-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc -; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v10, 0 +; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v9, 0 ; GISEL-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc ; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0 ; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3] -; GISEL-NEXT: v_mul_lo_u32 v6, v5, v10 +; GISEL-NEXT: v_mul_lo_u32 v6, v5, v9 ; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v4, v4, v10 +; GISEL-NEXT: v_mul_lo_u32 v4, v4, v9 ; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7] ; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v6, vcc @@ -585,9 +583,8 @@ define i128 @fptoui_f64_to_i128(double %x) { ; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0 -; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0 +; GISEL-NEXT: v_lshl_or_b32 v9, v0, 16, v0 ; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1 -; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0 ; GISEL-NEXT: v_mov_b32_e32 v0, 0x433 ; GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-NEXT: v_and_b32_e32 v2, 0xfffff, v5 @@ -601,30 +598,29 @@ define i128 @fptoui_f64_to_i128(double %x) { ; GISEL-NEXT: v_add_u32_e32 v7, 0xfffffbcd, v6 ; GISEL-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5] ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7 -; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0 +; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v0, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v1, vcc +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v10, v9, 0 ; GISEL-NEXT: v_add_u32_e32 v6, 0xfffffb8d, v6 ; GISEL-NEXT: v_sub_u32_e32 v2, 64, v7 ; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5] ; GISEL-NEXT: v_lshlrev_b64 v[4:5], v6, v[4:5] ; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v7 -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v11, v9, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0 +; GISEL-NEXT: v_cndmask_b32_e64 v12, v2, 0, s[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v10, v8, 0 ; GISEL-NEXT: v_mov_b32_e32 v2, v6 -; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10 +; GISEL-NEXT: v_mul_lo_u32 v6, v10, v9 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v10, v9, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v4, v11, v9 ; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2] +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v11, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11] ; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] -; GISEL-NEXT: ; implicit-def: $vgpr10 ; GISEL-NEXT: ; implicit-def: $vgpr9 ; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7] ; GISEL-NEXT: ; implicit-def: $vgpr6 @@ -641,13 +637,13 @@ define i128 @fptoui_f64_to_i128(double %x) { ; GISEL-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc -; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v10, 0 +; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v9, 0 ; GISEL-NEXT: v_cndmask_b32_e32 v5, v1, v5, vcc ; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0 ; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3] -; GISEL-NEXT: v_mul_lo_u32 v6, v5, v10 +; GISEL-NEXT: v_mul_lo_u32 v6, v5, v9 ; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v4, v4, v10 +; GISEL-NEXT: v_mul_lo_u32 v4, v4, v9 ; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7] ; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v6, vcc @@ -901,58 +897,56 @@ define i128 @fptosi_f32_to_i128(float %x) { ; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v0, v2 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v3 -; GISEL-NEXT: v_lshlrev_b16_e32 v5, 3, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v8, 3, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v3 -; GISEL-NEXT: v_or_b32_e32 v1, v1, v5 -; GISEL-NEXT: v_lshlrev_b16_e32 v8, 4, v0 -; GISEL-NEXT: v_or_b32_e32 v2, v2, v5 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v8 -; GISEL-NEXT: v_lshlrev_b16_e32 v9, 5, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v9, 4, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v8 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v9 -; GISEL-NEXT: v_lshlrev_b16_e32 v10, 6, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v10, 5, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v9 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v10 -; GISEL-NEXT: v_lshlrev_b16_e32 v11, 7, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v11, 6, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v10 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v11 -; GISEL-NEXT: v_lshlrev_b16_e32 v12, 8, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v12, 7, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v11 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v12 -; GISEL-NEXT: v_lshlrev_b16_e32 v13, 9, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v13, 8, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v12 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v13 -; GISEL-NEXT: v_lshlrev_b16_e32 v14, 10, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v14, 9, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v13 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v14 -; GISEL-NEXT: v_lshlrev_b16_e32 v15, 11, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v15, 10, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v14 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v15 -; GISEL-NEXT: v_lshlrev_b16_e32 v16, 12, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v16, 11, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v15 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v16 -; GISEL-NEXT: v_lshlrev_b16_e32 v17, 13, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v17, 12, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v16 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v17 -; GISEL-NEXT: v_lshlrev_b16_e32 v18, 14, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v18, 13, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v17 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v18 -; GISEL-NEXT: v_lshlrev_b16_e32 v0, 15, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v19, 14, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v18 +; GISEL-NEXT: v_or_b32_e32 v1, v1, v19 +; GISEL-NEXT: v_lshlrev_b16_e32 v0, 15, v0 +; GISEL-NEXT: v_or_b32_e32 v2, v2, v19 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v0 ; GISEL-NEXT: v_or_b32_e32 v0, v2, v0 ; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0 -; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0 -; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1 -; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0 +; GISEL-NEXT: v_lshl_or_b32 v8, v0, 16, v0 +; GISEL-NEXT: v_or3_b32 v9, v1, v2, 1 ; GISEL-NEXT: v_mov_b32_e32 v0, 0x96 ; GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4 ; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1] ; GISEL-NEXT: v_or_b32_e32 v4, 0x800000, v2 -; GISEL-NEXT: v_mov_b32_e32 v5, 0 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc ; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7] @@ -961,35 +955,34 @@ define i128 @fptosi_f32_to_i128(float %x) { ; GISEL-NEXT: v_add_u32_e32 v7, 0xffffff6a, v6 ; GISEL-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5] ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7 -; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0 +; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v0, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v1, vcc +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v10, v8, 0 ; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff2a, v6 ; GISEL-NEXT: v_sub_u32_e32 v2, 64, v7 ; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5] ; GISEL-NEXT: v_lshlrev_b64 v[4:5], v6, v[4:5] ; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v7 -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v11, v8, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0 +; GISEL-NEXT: v_cndmask_b32_e64 v12, v2, 0, s[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v10, v9, 0 ; GISEL-NEXT: v_mov_b32_e32 v2, v6 -; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10 +; GISEL-NEXT: v_mul_lo_u32 v6, v10, v8 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v10, v8, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v4, v11, v8 ; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2] +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v11, v9, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11] ; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] -; GISEL-NEXT: ; implicit-def: $vgpr10 -; GISEL-NEXT: ; implicit-def: $vgpr9 -; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7] +; GISEL-NEXT: ; implicit-def: $vgpr8 +; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7] ; GISEL-NEXT: ; implicit-def: $vgpr6 ; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 -; GISEL-NEXT: ; implicit-def: $vgpr8 +; GISEL-NEXT: ; implicit-def: $vgpr9 ; GISEL-NEXT: .LBB2_4: ; %Flow ; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17] ; GISEL-NEXT: s_cbranch_execz .LBB2_6 @@ -1000,10 +993,10 @@ define i128 @fptosi_f32_to_i128(float %x) { ; GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v8, 0 -; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v10, 0 -; GISEL-NEXT: v_mul_lo_u32 v5, v4, v10 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0 +; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v8, 0 +; GISEL-NEXT: v_mul_lo_u32 v5, v4, v8 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc ; GISEL-NEXT: .LBB2_6: ; %Flow1 ; GISEL-NEXT: s_or_b64 exec, exec, s[6:7] @@ -1255,58 +1248,56 @@ define i128 @fptoui_f32_to_i128(float %x) { ; GISEL-NEXT: v_lshlrev_b16_e32 v3, 2, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v0, v2 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v3 -; GISEL-NEXT: v_lshlrev_b16_e32 v5, 3, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v8, 3, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v3 -; GISEL-NEXT: v_or_b32_e32 v1, v1, v5 -; GISEL-NEXT: v_lshlrev_b16_e32 v8, 4, v0 -; GISEL-NEXT: v_or_b32_e32 v2, v2, v5 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v8 -; GISEL-NEXT: v_lshlrev_b16_e32 v9, 5, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v9, 4, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v8 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v9 -; GISEL-NEXT: v_lshlrev_b16_e32 v10, 6, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v10, 5, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v9 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v10 -; GISEL-NEXT: v_lshlrev_b16_e32 v11, 7, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v11, 6, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v10 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v11 -; GISEL-NEXT: v_lshlrev_b16_e32 v12, 8, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v12, 7, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v11 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v12 -; GISEL-NEXT: v_lshlrev_b16_e32 v13, 9, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v13, 8, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v12 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v13 -; GISEL-NEXT: v_lshlrev_b16_e32 v14, 10, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v14, 9, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v13 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v14 -; GISEL-NEXT: v_lshlrev_b16_e32 v15, 11, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v15, 10, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v14 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v15 -; GISEL-NEXT: v_lshlrev_b16_e32 v16, 12, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v16, 11, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v15 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v16 -; GISEL-NEXT: v_lshlrev_b16_e32 v17, 13, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v17, 12, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v16 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v17 -; GISEL-NEXT: v_lshlrev_b16_e32 v18, 14, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v18, 13, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v17 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v18 -; GISEL-NEXT: v_lshlrev_b16_e32 v0, 15, v0 +; GISEL-NEXT: v_lshlrev_b16_e32 v19, 14, v0 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v18 +; GISEL-NEXT: v_or_b32_e32 v1, v1, v19 +; GISEL-NEXT: v_lshlrev_b16_e32 v0, 15, v0 +; GISEL-NEXT: v_or_b32_e32 v2, v2, v19 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v0 ; GISEL-NEXT: v_or_b32_e32 v0, v2, v0 ; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0 -; GISEL-NEXT: v_lshl_or_b32 v10, v0, 16, v0 -; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1 -; GISEL-NEXT: v_or3_b32 v9, v0, v2, 0 +; GISEL-NEXT: v_lshl_or_b32 v8, v0, 16, v0 +; GISEL-NEXT: v_or3_b32 v9, v1, v2, 1 ; GISEL-NEXT: v_mov_b32_e32 v0, 0x96 ; GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-NEXT: v_and_b32_e32 v2, 0x7fffff, v4 ; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[6:7], v[0:1] ; GISEL-NEXT: v_or_b32_e32 v4, 0x800000, v2 -; GISEL-NEXT: v_mov_b32_e32 v5, 0 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc ; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7] @@ -1315,35 +1306,34 @@ define i128 @fptoui_f32_to_i128(float %x) { ; GISEL-NEXT: v_add_u32_e32 v7, 0xffffff6a, v6 ; GISEL-NEXT: v_lshlrev_b64 v[0:1], v7, v[4:5] ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7 -; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v10, 0 +; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v0, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v1, vcc +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v10, v8, 0 ; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff2a, v6 ; GISEL-NEXT: v_sub_u32_e32 v2, 64, v7 ; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5] ; GISEL-NEXT: v_lshlrev_b64 v[4:5], v6, v[4:5] ; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v7 -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v11, v8, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v8, v[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0 +; GISEL-NEXT: v_cndmask_b32_e64 v12, v2, 0, s[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v10, v9, 0 ; GISEL-NEXT: v_mov_b32_e32 v2, v6 -; GISEL-NEXT: v_mul_lo_u32 v6, v11, v10 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v4, v12, v10 +; GISEL-NEXT: v_mul_lo_u32 v6, v10, v8 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v10, v8, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v4, v11, v8 ; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2] +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v11, v9, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11] ; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[4:5] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] -; GISEL-NEXT: ; implicit-def: $vgpr10 -; GISEL-NEXT: ; implicit-def: $vgpr9 -; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7] +; GISEL-NEXT: ; implicit-def: $vgpr8 +; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7] ; GISEL-NEXT: ; implicit-def: $vgpr6 ; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5 -; GISEL-NEXT: ; implicit-def: $vgpr8 +; GISEL-NEXT: ; implicit-def: $vgpr9 ; GISEL-NEXT: .LBB3_4: ; %Flow ; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17] ; GISEL-NEXT: s_cbranch_execz .LBB3_6 @@ -1354,10 +1344,10 @@ define i128 @fptoui_f32_to_i128(float %x) { ; GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 ; GISEL-NEXT: v_cndmask_b32_e32 v4, v0, v4, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v8, 0 -; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v10, 0 -; GISEL-NEXT: v_mul_lo_u32 v5, v4, v10 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0 +; GISEL-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v8, 0 +; GISEL-NEXT: v_mul_lo_u32 v5, v4, v8 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc ; GISEL-NEXT: .LBB3_6: ; %Flow1 ; GISEL-NEXT: s_or_b64 exec, exec, s[6:7] @@ -1674,66 +1664,65 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) { ; GISEL-NEXT: v_or_b32_e32 v1, v1, v18 ; GISEL-NEXT: v_or_b32_e32 v0, v0, v19 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v19 -; GISEL-NEXT: v_and_b32_e32 v11, 0xffff, v0 +; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v11 -; GISEL-NEXT: v_or3_b32 v9, v1, v0, 1 -; GISEL-NEXT: v_or3_b32 v10, v11, v0, 0 +; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0 +; GISEL-NEXT: v_lshl_or_b32 v9, v0, 16, v0 +; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1 ; GISEL-NEXT: v_mov_b32_e32 v0, 0x86 ; GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-NEXT: v_and_b32_e32 v2, 0x7f, v4 ; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[5:6], v[0:1] -; GISEL-NEXT: v_or_b32_e32 v7, 0x80, v2 -; GISEL-NEXT: v_mov_b32_e32 v8, 0 +; GISEL-NEXT: v_mov_b32_e32 v7, 0 +; GISEL-NEXT: v_or_b32_e32 v6, 0x80, v2 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc ; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7] ; GISEL-NEXT: s_cbranch_execz .LBB6_4 ; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else -; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff7a, v5 -; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[7:8] -; GISEL-NEXT: v_add_u32_e32 v4, 0xffffff3a, v5 -; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6 -; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6 -; GISEL-NEXT: v_lshl_or_b32 v11, v11, 16, v11 -; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[7:8] -; GISEL-NEXT: v_lshlrev_b64 v[4:5], v4, v[7:8] -; GISEL-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc +; GISEL-NEXT: v_add_u32_e32 v10, 0xffffff7a, v5 +; GISEL-NEXT: v_lshlrev_b64 v[0:1], v10, v[6:7] +; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v10 +; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v8, v11, 0 -; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6 +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v9, 0 +; GISEL-NEXT: v_add_u32_e32 v4, 0xffffff3a, v5 +; GISEL-NEXT: v_sub_u32_e32 v2, 64, v10 +; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[6:7] +; GISEL-NEXT: v_lshlrev_b64 v[4:5], v4, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v10, v[0:1] -; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v8, v9, 0 -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[6:7] -; GISEL-NEXT: v_mul_lo_u32 v4, v12, v11 -; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc +; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v10 +; GISEL-NEXT: v_cndmask_b32_e64 v10, v2, 0, s[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v10, v8, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0 ; GISEL-NEXT: v_mov_b32_e32 v2, v6 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v8, v10, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v6, v8, v11 -; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v9, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v6, v11, v9 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v4, v12, v9 +; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11] ; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v10, v[4:5] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v10, v9, v[4:5] +; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] ; GISEL-NEXT: ; implicit-def: $vgpr5 -; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7] -; GISEL-NEXT: ; implicit-def: $vgpr7_vgpr8 -; GISEL-NEXT: ; implicit-def: $vgpr9 +; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7] +; GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GISEL-NEXT: ; implicit-def: $vgpr8 ; GISEL-NEXT: .LBB6_4: ; %Flow ; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17] ; GISEL-NEXT: s_cbranch_execz .LBB6_6 ; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12 ; GISEL-NEXT: v_sub_co_u32_e32 v2, vcc, 0x86, v5 -; GISEL-NEXT: v_lshrrev_b64 v[0:1], v2, v[7:8] +; GISEL-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7] ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2 ; GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc -; GISEL-NEXT: v_mul_hi_i32_i24_e32 v1, v0, v9 +; GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc +; GISEL-NEXT: v_mul_hi_i32_i24_e32 v1, v0, v8 ; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v9 +; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v8 ; GISEL-NEXT: v_mov_b32_e32 v3, v2 ; GISEL-NEXT: .LBB6_6: ; %Flow1 ; GISEL-NEXT: s_or_b64 exec, exec, s[6:7] @@ -2022,66 +2011,65 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) { ; GISEL-NEXT: v_or_b32_e32 v1, v1, v18 ; GISEL-NEXT: v_or_b32_e32 v0, v0, v19 ; GISEL-NEXT: v_or_b32_e32 v1, v1, v19 -; GISEL-NEXT: v_and_b32_e32 v11, 0xffff, v0 +; GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v11 -; GISEL-NEXT: v_or3_b32 v9, v1, v0, 1 -; GISEL-NEXT: v_or3_b32 v10, v11, v0, 0 +; GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v0 +; GISEL-NEXT: v_lshl_or_b32 v9, v0, 16, v0 +; GISEL-NEXT: v_or3_b32 v8, v1, v2, 1 ; GISEL-NEXT: v_mov_b32_e32 v0, 0x86 ; GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GISEL-NEXT: v_and_b32_e32 v2, 0x7f, v4 ; GISEL-NEXT: v_cmp_ge_u64_e32 vcc, v[5:6], v[0:1] -; GISEL-NEXT: v_or_b32_e32 v7, 0x80, v2 -; GISEL-NEXT: v_mov_b32_e32 v8, 0 +; GISEL-NEXT: v_mov_b32_e32 v7, 0 +; GISEL-NEXT: v_or_b32_e32 v6, 0x80, v2 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3 ; GISEL-NEXT: s_and_saveexec_b64 s[6:7], vcc ; GISEL-NEXT: s_xor_b64 s[16:17], exec, s[6:7] ; GISEL-NEXT: s_cbranch_execz .LBB7_4 ; GISEL-NEXT: ; %bb.3: ; %fp-to-i-if-else -; GISEL-NEXT: v_add_u32_e32 v6, 0xffffff7a, v5 -; GISEL-NEXT: v_lshlrev_b64 v[0:1], v6, v[7:8] -; GISEL-NEXT: v_add_u32_e32 v4, 0xffffff3a, v5 -; GISEL-NEXT: v_sub_u32_e32 v2, 64, v6 -; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v6 -; GISEL-NEXT: v_lshl_or_b32 v11, v11, 16, v11 -; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[7:8] -; GISEL-NEXT: v_lshlrev_b64 v[4:5], v4, v[7:8] -; GISEL-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc +; GISEL-NEXT: v_add_u32_e32 v10, 0xffffff7a, v5 +; GISEL-NEXT: v_lshlrev_b64 v[0:1], v10, v[6:7] +; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v10 +; GISEL-NEXT: v_cndmask_b32_e32 v11, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v1, vcc -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v8, v11, 0 -; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v6 +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v11, v9, 0 +; GISEL-NEXT: v_add_u32_e32 v4, 0xffffff3a, v5 +; GISEL-NEXT: v_sub_u32_e32 v2, 64, v10 +; GISEL-NEXT: v_lshrrev_b64 v[2:3], v2, v[6:7] +; GISEL-NEXT: v_lshlrev_b64 v[4:5], v4, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v12, v10, v[0:1] -; GISEL-NEXT: v_cndmask_b32_e64 v13, v2, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v8, v9, 0 -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v9, v[6:7] -; GISEL-NEXT: v_mul_lo_u32 v4, v12, v11 -; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc +; GISEL-NEXT: v_cmp_eq_u32_e64 s[6:7], 0, v10 +; GISEL-NEXT: v_cndmask_b32_e64 v10, v2, 0, s[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v10, v8, v[6:7] +; GISEL-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0 ; GISEL-NEXT: v_mov_b32_e32 v2, v6 -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v8, v10, v[1:2] -; GISEL-NEXT: v_mul_lo_u32 v6, v8, v11 -; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] -; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v9, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v6, v11, v9 +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2] +; GISEL-NEXT: v_mul_lo_u32 v4, v12, v9 +; GISEL-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc +; GISEL-NEXT: v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2] ; GISEL-NEXT: v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11] ; GISEL-NEXT: v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9] -; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v13, v10, v[4:5] +; GISEL-NEXT: v_mad_u64_u32 v[6:7], s[8:9], v10, v9, v[4:5] +; GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[6:7] ; GISEL-NEXT: ; implicit-def: $vgpr5 -; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7] -; GISEL-NEXT: ; implicit-def: $vgpr7_vgpr8 -; GISEL-NEXT: ; implicit-def: $vgpr9 +; GISEL-NEXT: v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7] +; GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7 +; GISEL-NEXT: ; implicit-def: $vgpr8 ; GISEL-NEXT: .LBB7_4: ; %Flow ; GISEL-NEXT: s_andn2_saveexec_b64 s[6:7], s[16:17] ; GISEL-NEXT: s_cbranch_execz .LBB7_6 ; GISEL-NEXT: ; %bb.5: ; %fp-to-i-if-then12 ; GISEL-NEXT: v_sub_co_u32_e32 v2, vcc, 0x86, v5 -; GISEL-NEXT: v_lshrrev_b64 v[0:1], v2, v[7:8] +; GISEL-NEXT: v_lshrrev_b64 v[0:1], v2, v[6:7] ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v2 ; GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2 -; GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc -; GISEL-NEXT: v_mul_hi_i32_i24_e32 v1, v0, v9 +; GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc +; GISEL-NEXT: v_mul_hi_i32_i24_e32 v1, v0, v8 ; GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1 -; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v9 +; GISEL-NEXT: v_mul_i32_i24_e32 v0, v0, v8 ; GISEL-NEXT: v_mov_b32_e32 v3, v2 ; GISEL-NEXT: .LBB7_6: ; %Flow1 ; GISEL-NEXT: s_or_b64 exec, exec, s[6:7] diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll index d1403b6c1a01d..41cbbe57d7a36 100644 --- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll +++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll @@ -238,7 +238,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in) ; VI-SAFE-GISEL-NEXT: s_cmp_gt_i32 s6, 5 ; VI-SAFE-GISEL-NEXT: s_cselect_b32 s6, 1, 0 ; VI-SAFE-GISEL-NEXT: s_or_b32 s6, s7, s6 -; VI-SAFE-GISEL-NEXT: s_and_b32 s6, s6, 1 ; VI-SAFE-GISEL-NEXT: s_add_i32 s2, s2, s6 ; VI-SAFE-GISEL-NEXT: s_cmp_gt_i32 s4, 30 ; VI-SAFE-GISEL-NEXT: s_cselect_b32 s2, 0x7c00, s2 @@ -364,7 +363,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in) ; GFX10-SAFE-GISEL-NEXT: s_cmp_gt_i32 s6, 5 ; GFX10-SAFE-GISEL-NEXT: s_cselect_b32 s6, 1, 0 ; GFX10-SAFE-GISEL-NEXT: s_or_b32 s6, s7, s6 -; GFX10-SAFE-GISEL-NEXT: s_and_b32 s6, s6, 1 ; GFX10-SAFE-GISEL-NEXT: s_add_i32 s2, s2, s6 ; GFX10-SAFE-GISEL-NEXT: s_cmp_gt_i32 s4, 30 ; GFX10-SAFE-GISEL-NEXT: s_cselect_b32 s2, 0x7c00, s2 @@ -501,8 +499,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in) ; GFX11-SAFE-GISEL-NEXT: s_cselect_b32 s6, 1, 0 ; GFX11-SAFE-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX11-SAFE-GISEL-NEXT: s_or_b32 s6, s7, s6 -; GFX11-SAFE-GISEL-NEXT: s_and_b32 s6, s6, 1 -; GFX11-SAFE-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-SAFE-GISEL-NEXT: s_add_i32 s2, s2, s6 ; GFX11-SAFE-GISEL-NEXT: s_cmp_gt_i32 s4, 30 ; GFX11-SAFE-GISEL-NEXT: s_cselect_b32 s2, 0x7c00, s2 diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll index 94475e97b2e40..f096c1752b84a 100644 --- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll +++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll @@ -4602,13 +4602,21 @@ define amdgpu_ps float @global_load_saddr_i8_offset_or_i64_imm_offset_16(ptr add ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: ; return to shader part epilog ; -; GFX12-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_16: -; GFX12: ; %bb.0: -; GFX12-NEXT: v_or_b32_e32 v0, 16, v0 -; GFX12-NEXT: v_mov_b32_e32 v1, 0 -; GFX12-NEXT: global_load_u8 v0, v[0:1], off -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-SDAG-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_16: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: v_or_b32_e32 v0, 16, v0 +; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-SDAG-NEXT: global_load_u8 v0, v[0:1], off +; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_16: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-GISEL-NEXT: v_or_b32_e32 v0, 16, v0 +; GFX12-GISEL-NEXT: global_load_u8 v0, v[0:1], off +; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: ; return to shader part epilog %zext.idx = zext i32 %idx to i64 %or = or i64 %zext.idx, 16 %addr = inttoptr i64 %or to ptr addrspace(1) @@ -4635,13 +4643,21 @@ define amdgpu_ps float @global_load_saddr_i8_offset_or_i64_imm_offset_4160(ptr a ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: ; return to shader part epilog ; -; GFX12-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_4160: -; GFX12: ; %bb.0: -; GFX12-NEXT: v_or_b32_e32 v0, 0x1040, v0 -; GFX12-NEXT: v_mov_b32_e32 v1, 0 -; GFX12-NEXT: global_load_u8 v0, v[0:1], off -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-SDAG-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_4160: +; GFX12-SDAG: ; %bb.0: +; GFX12-SDAG-NEXT: v_or_b32_e32 v0, 0x1040, v0 +; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-SDAG-NEXT: global_load_u8 v0, v[0:1], off +; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-NEXT: ; return to shader part epilog +; +; GFX12-GISEL-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_4160: +; GFX12-GISEL: ; %bb.0: +; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-GISEL-NEXT: v_or_b32_e32 v0, 0x1040, v0 +; GFX12-GISEL-NEXT: global_load_u8 v0, v[0:1], off +; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-NEXT: ; return to shader part epilog %zext.idx = zext i32 %idx to i64 %or = or i64 %zext.idx, 4160 %addr = inttoptr i64 %or to ptr addrspace(1)