@@ -12918,60 +12918,19 @@ define <4 x i32> @mgather_broadcast_load_unmasked2(ptr %base) {
1291812918; RV32-LABEL: mgather_broadcast_load_unmasked2:
1291912919; RV32: # %bb.0:
1292012920; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
12921- ; RV32-NEXT: vmv.v.x v8, a0
12922- ; RV32-NEXT: vluxei32.v v8, (zero), v8
12921+ ; RV32-NEXT: vlse32.v v8, (a0), zero
1292312922; RV32-NEXT: ret
1292412923;
1292512924; RV64V-LABEL: mgather_broadcast_load_unmasked2:
1292612925; RV64V: # %bb.0:
12927- ; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
12928- ; RV64V-NEXT: vmv.v.x v10, a0
12929- ; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
12930- ; RV64V-NEXT: vluxei64.v v8, (zero), v10
12926+ ; RV64V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
12927+ ; RV64V-NEXT: vlse32.v v8, (a0), zero
1293112928; RV64V-NEXT: ret
1293212929;
1293312930; RV64ZVE32F-LABEL: mgather_broadcast_load_unmasked2:
1293412931; RV64ZVE32F: # %bb.0:
12935- ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
12936- ; RV64ZVE32F-NEXT: vmset.m v8
12937- ; RV64ZVE32F-NEXT: vmv.x.s a1, v8
12938- ; RV64ZVE32F-NEXT: # implicit-def: $v8
12939- ; RV64ZVE32F-NEXT: beqz zero, .LBB100_5
12940- ; RV64ZVE32F-NEXT: # %bb.1: # %else
12941- ; RV64ZVE32F-NEXT: andi a2, a1, 2
12942- ; RV64ZVE32F-NEXT: bnez a2, .LBB100_6
12943- ; RV64ZVE32F-NEXT: .LBB100_2: # %else2
12944- ; RV64ZVE32F-NEXT: andi a2, a1, 4
12945- ; RV64ZVE32F-NEXT: bnez a2, .LBB100_7
12946- ; RV64ZVE32F-NEXT: .LBB100_3: # %else5
12947- ; RV64ZVE32F-NEXT: andi a1, a1, 8
12948- ; RV64ZVE32F-NEXT: bnez a1, .LBB100_8
12949- ; RV64ZVE32F-NEXT: .LBB100_4: # %else8
12950- ; RV64ZVE32F-NEXT: ret
12951- ; RV64ZVE32F-NEXT: .LBB100_5: # %cond.load
1295212932; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1295312933; RV64ZVE32F-NEXT: vlse32.v v8, (a0), zero
12954- ; RV64ZVE32F-NEXT: andi a2, a1, 2
12955- ; RV64ZVE32F-NEXT: beqz a2, .LBB100_2
12956- ; RV64ZVE32F-NEXT: .LBB100_6: # %cond.load1
12957- ; RV64ZVE32F-NEXT: lw a2, 0(a0)
12958- ; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
12959- ; RV64ZVE32F-NEXT: vmv.s.x v9, a2
12960- ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
12961- ; RV64ZVE32F-NEXT: andi a2, a1, 4
12962- ; RV64ZVE32F-NEXT: beqz a2, .LBB100_3
12963- ; RV64ZVE32F-NEXT: .LBB100_7: # %cond.load4
12964- ; RV64ZVE32F-NEXT: lw a2, 0(a0)
12965- ; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma
12966- ; RV64ZVE32F-NEXT: vmv.s.x v9, a2
12967- ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2
12968- ; RV64ZVE32F-NEXT: andi a1, a1, 8
12969- ; RV64ZVE32F-NEXT: beqz a1, .LBB100_4
12970- ; RV64ZVE32F-NEXT: .LBB100_8: # %cond.load7
12971- ; RV64ZVE32F-NEXT: lw a0, 0(a0)
12972- ; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
12973- ; RV64ZVE32F-NEXT: vmv.s.x v9, a0
12974- ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3
1297512934; RV64ZVE32F-NEXT: ret
1297612935 %head = insertelement <4 x i1> poison, i1 true, i32 0
1297712936 %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
0 commit comments