diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 3672a91e33a30..078825f2a9a22 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5127,7 +5127,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, // If the sign portion ends in our element the subtraction gives correct // result. Otherwise it gives either negative or > bitwidth result - return std::clamp(KnownSign - rIndex * BitWidth, 0, BitWidth); + return std::clamp(KnownSign - rIndex * BitWidth, 1, BitWidth); } case ISD::INSERT_VECTOR_ELT: { if (VT.isScalableVector()) diff --git a/llvm/test/CodeGen/AMDGPU/pr155452.ll b/llvm/test/CodeGen/AMDGPU/pr155452.ll new file mode 100644 index 0000000000000..d021b210c7f3a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/pr155452.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc %s -march=amdgcn -o - | FileCheck %s + +target triple = "amdgcn-amd-amdhsa" + +define amdgpu_kernel void @my_kernel(i64 %foo, i32 %bar) { +; CHECK-LABEL: my_kernel: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_mov_b32 flat_scratch_lo, s13 +; CHECK-NEXT: s_add_i32 s12, s12, s17 +; CHECK-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-NEXT: s_load_dword s0, s[8:9], 0x2 +; CHECK-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 +; CHECK-NEXT: s_mov_b64 s[4:5], 1 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_ashr_i32 s6, s0, 31 +; CHECK-NEXT: s_abs_i32 s7, s0 +; CHECK-NEXT: v_cvt_f32_u32_e32 v2, s7 +; CHECK-NEXT: s_sub_i32 s0, 0, s7 +; CHECK-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; CHECK-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 +; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v2 +; CHECK-NEXT: v_mul_lo_u32 v3, s0, v2 +; CHECK-NEXT: v_mul_hi_u32 v3, v2, v3 +; CHECK-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; CHECK-NEXT: s_and_b64 s[0:1], exec, -1 +; CHECK-NEXT: .LBB0_1: ; %loop +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: v_mov_b32_e32 v3, s4 +; CHECK-NEXT: v_mad_u64_u32 v[3:4], s[8:9], s2, v3, 1 +; CHECK-NEXT: s_mul_i32 s4, s3, s4 +; CHECK-NEXT: s_mul_i32 s5, s2, s5 +; CHECK-NEXT: v_add_i32_e32 v4, vcc, s4, v4 +; CHECK-NEXT: v_readfirstlane_b32 s4, v3 +; CHECK-NEXT: v_add_i32_e32 v4, vcc, s5, v4 +; CHECK-NEXT: s_ashr_i32 s5, s4, 31 +; CHECK-NEXT: s_abs_i32 s8, s4 +; CHECK-NEXT: s_xor_b32 s5, s5, s6 +; CHECK-NEXT: v_mul_hi_u32 v3, s8, v2 +; CHECK-NEXT: v_readfirstlane_b32 s9, v3 +; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; CHECK-NEXT: s_mul_i32 s9, s9, s7 +; CHECK-NEXT: s_sub_i32 s8, s8, s9 +; CHECK-NEXT: s_sub_i32 s9, s8, s7 +; CHECK-NEXT: s_cmp_ge_u32 s8, s7 +; CHECK-NEXT: s_cselect_b64 vcc, -1, 0 +; CHECK-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; CHECK-NEXT: s_cselect_b32 s8, s9, s8 +; CHECK-NEXT: v_add_i32_e32 v5, vcc, 1, v3 +; CHECK-NEXT: s_cmp_ge_u32 s8, s7 +; CHECK-NEXT: s_cselect_b64 vcc, -1, 0 +; CHECK-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; CHECK-NEXT: v_xor_b32_e32 v3, s5, v3 +; CHECK-NEXT: v_subrev_i32_e32 v3, vcc, s5, v3 +; CHECK-NEXT: v_ashrrev_i32_e32 v5, 31, v3 +; CHECK-NEXT: v_or_b32_e32 v3, s4, v3 +; CHECK-NEXT: v_or_b32_e32 v4, v4, v5 +; CHECK-NEXT: flat_load_dwordx2 v[3:4], v[3:4] +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[3:4] +; CHECK-NEXT: s_mov_b64 s[4:5], 0 +; CHECK-NEXT: s_mov_b64 vcc, s[0:1] +; CHECK-NEXT: s_cbranch_vccnz .LBB0_1 +; CHECK-NEXT: ; %bb.2: ; %DummyReturnBlock +; CHECK-NEXT: s_endpgm +entry: + br label %loop + +loop: ; preds = %entry, %loop + %i = phi i64 [ 1, %entry ], [ 0, %loop ] + %mul = mul i64 %foo, %i + %add = add i64 %mul, 1 + %trunc = trunc i64 %add to i32 + %div = sdiv i32 %trunc, %bar + %sext = sext i32 %div to i64 + %or = or i64 %add, %sext + %inttoptr = inttoptr i64 %or to ptr + %addrspacecast = addrspacecast ptr %inttoptr to ptr addrspace(1) + %val = load double, ptr addrspace(1) %addrspacecast, align 8 + store double %val, ptr addrspace(1) null, align 8 + br label %loop +}