Skip to content

Conversation

@paulwalker-arm
Copy link
Collaborator

Fixes #100497

@llvmbot
Copy link
Member

llvmbot commented Sep 20, 2024

@llvm/pr-subscribers-llvm-transforms

@llvm/pr-subscribers-backend-aarch64

Author: Paul Walker (paulwalker-arm)

Changes

Fixes #100497


Full diff: https://github.com/llvm/llvm-project/pull/109445.diff

2 Files Affected:

  • (modified) llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp (+12)
  • (added) llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll (+73)
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 11a4aa4d01e123..a7b4a176b9f7cd 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2140,6 +2140,16 @@ static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
   return IC.replaceInstUsesWith(II, LSL);
 }
 
+static std::optional<Instruction *> instCombineSVEInsr(InstCombiner &IC,
+                                                       IntrinsicInst &II) {
+  Value *Vec = II.getOperand(0);
+
+  if (getSplatValue(Vec) == II.getOperand(1))
+    return IC.replaceInstUsesWith(II, Vec);
+
+  return std::nullopt;
+}
+
 std::optional<Instruction *>
 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
                                      IntrinsicInst &II) const {
@@ -2460,6 +2470,8 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
     return instCombineSVESrshl(IC, II);
   case Intrinsic::aarch64_sve_dupq_lane:
     return instCombineSVEDupqLane(IC, II);
+  case Intrinsic::aarch64_sve_insr:
+    return instCombineSVEInsr(IC, II);
   }
 
   return std::nullopt;
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
new file mode 100644
index 00000000000000..e8489c5be85c41
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
@@ -0,0 +1,73 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 16 x i8> @insr_val_into_splatted_val_int(i8 %a) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_val_int(
+; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[A:%.*]], i64 0
+; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[T1]]
+;
+  %t0 = insertelement <vscale x 16 x i8> poison, i8 %a, i64 0
+  %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %t2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %t1, i8 %a)
+  ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @insr_five_into_fives() #0 {
+; CHECK-LABEL: @insr_five_into_fives(
+; CHECK-NEXT:    ret <vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+;
+  %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 5)
+  ret <vscale x 8 x i16> %t1
+}
+
+define <vscale x 4 x float> @insr_val_into_splatted_val_fp(float %a) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_val_fp(
+; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 4 x float> poison, float [[A:%.*]], i64 0
+; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 4 x float> [[T0]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x float> [[T1]]
+;
+  %t0 = insertelement <vscale x 4 x float> poison, float %a, i64 0
+  %t1 = shufflevector <vscale x 4 x float> %t0, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  %t2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float> %t1, float %a)
+  ret <vscale x 4 x float> %t2
+}
+
+define <vscale x 2 x double> @insr_zero_into_zero() #0 {
+; CHECK-LABEL: @insr_zero_into_zero(
+; CHECK-NEXT:    ret <vscale x 2 x double> zeroinitializer
+;
+  %t1 = tail call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> zeroinitializer, double zeroinitializer)
+  ret <vscale x 2 x double> %t1
+}
+
+define <vscale x 16 x i8> @insr_val_into_splatted_other(i8 %a, i8 %b) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_other(
+; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
+; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT:    [[T2:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> [[T1]], i8 [[A:%.*]])
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[T2]]
+;
+  %t0 = insertelement <vscale x 16 x i8> poison, i8 %b, i64 0
+  %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %t2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %t1, i8 %a)
+  ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @insr_three_into_fives() #0 {
+; CHECK-LABEL: @insr_three_into_fives(
+; CHECK-NEXT:    [[T1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), i16 3)
+; CHECK-NEXT:    ret <vscale x 8 x i16> [[T1]]
+;
+  %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 3)
+  ret <vscale x 8 x i16> %t1
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8>, i8)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16>, i16)
+declare <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float>, float)
+declare <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double>, double)
+
+attributes #0 = { "target-features"="+sve" }

@paulwalker-arm paulwalker-arm merged commit 622ae7f into llvm:main Sep 24, 2024
@paulwalker-arm paulwalker-arm deleted the sve-insr-combine branch September 24, 2024 14:11
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

aarch64 (SVE): svdup_s8 followed by svinsr of the same value could be optimized

3 participants