Skip to content

Commit 042fad4

Browse files
committed
[RISCV] Unify vsetvli compatibility logic in forward and backwards passes
The backwards local postpass has its own logic for figuring out if two vsetvlis are compatible, separate from isCompatible used by the forward pass. However these largely work out the same thing, i.e. is it possible to mutate vsetvli A to be vsetvli B given these demanded properties. The main difference between the two is that the backwards postpass needs to be able to reason about vsetvli x0, x0, whereas the forward pass doesn't (because regular vector pseudos can't express this). So if we teach VSETVLIInfo used by the forward pass to handle the vsetvli x0, x0 case, then it becomes possible to unify the two passes. To do this we introduce a new state to represent that the VL is preserved from the previous vsetvli. Then in VSETVLIInfo::isCompatible, we can use this information to avoid check if the AVLs are the same if we know that the second vsetvli is x0, x0. For the backwards pass, we keep a running track of VSETVLIInfo as we iterate up through the basic block, and swap out canMutatePriorConfig with VSETVLIInfo::isCompatible. It's possible now to move areCompatibleVTYPEs into VSETVLIInfo, but I've deferred that code motion for now to keep the diff small, and can move it afterwards as an NFC.
1 parent 76b53a0 commit 042fad4

File tree

4 files changed

+63
-90
lines changed

4 files changed

+63
-90
lines changed

llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Lines changed: 49 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -419,6 +419,17 @@ DemandedFields getDemanded(const MachineInstr &MI,
419419
return Res;
420420
}
421421

422+
static MachineInstr *isADDIX0(Register Reg, const MachineRegisterInfo &MRI) {
423+
if (Reg == RISCV::X0)
424+
return nullptr;
425+
if (MachineInstr *MI = MRI.getVRegDef(Reg);
426+
MI && MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&
427+
MI->getOperand(2).isImm() && MI->getOperand(1).getReg() == RISCV::X0 &&
428+
MI->getOperand(2).getImm() != 0)
429+
return MI;
430+
return nullptr;
431+
}
432+
422433
/// Defines the abstract state with which the forward dataflow models the
423434
/// values of the VL and VTYPE registers after insertion.
424435
class VSETVLIInfo {
@@ -431,6 +442,7 @@ class VSETVLIInfo {
431442
Uninitialized,
432443
AVLIsReg,
433444
AVLIsImm,
445+
PreserveVL, // vsetvli x0, x0
434446
Unknown,
435447
} State = Uninitialized;
436448

@@ -466,6 +478,8 @@ class VSETVLIInfo {
466478
State = AVLIsImm;
467479
}
468480

481+
void setPreserveVL() { State = PreserveVL; }
482+
469483
bool hasAVLImm() const { return State == AVLIsImm; }
470484
bool hasAVLReg() const { return State == AVLIsReg; }
471485
Register getAVLReg() const {
@@ -486,11 +500,7 @@ class VSETVLIInfo {
486500
if (hasAVLReg()) {
487501
if (getAVLReg() == RISCV::X0)
488502
return true;
489-
if (MachineInstr *MI = MRI.getVRegDef(getAVLReg());
490-
MI && MI->getOpcode() == RISCV::ADDI &&
491-
MI->getOperand(1).isReg() && MI->getOperand(2).isImm() &&
492-
MI->getOperand(1).getReg() == RISCV::X0 &&
493-
MI->getOperand(2).getImm() != 0)
503+
if (isADDIX0(getAVLReg(), MRI))
494504
return true;
495505
return false;
496506
}
@@ -579,8 +589,11 @@ class VSETVLIInfo {
579589
// Determine whether the vector instructions requirements represented by
580590
// Require are compatible with the previous vsetvli instruction represented
581591
// by this. MI is the instruction whose requirements we're considering.
592+
// The instruction represented by Require should come after this, unless
593+
// OrderReversed is true.
582594
bool isCompatible(const DemandedFields &Used, const VSETVLIInfo &Require,
583-
const MachineRegisterInfo &MRI) const {
595+
const MachineRegisterInfo &MRI,
596+
bool OrderReversed = false) const {
584597
assert(isValid() && Require.isValid() &&
585598
"Can't compare invalid VSETVLIInfos");
586599
assert(!Require.SEWLMULRatioOnly &&
@@ -593,11 +606,15 @@ class VSETVLIInfo {
593606
if (SEWLMULRatioOnly)
594607
return false;
595608

596-
if (Used.VLAny && !hasSameAVL(Require))
597-
return false;
609+
// If the VL will be preserved, then we don't need to check the AVL.
610+
const uint8_t EndState = OrderReversed ? State : Require.State;
611+
if (EndState != PreserveVL) {
612+
if (Used.VLAny && !hasSameAVL(Require))
613+
return false;
598614

599-
if (Used.VLZeroness && !hasEquallyZeroAVL(Require, MRI))
600-
return false;
615+
if (Used.VLZeroness && !hasEquallyZeroAVL(Require, MRI))
616+
return false;
617+
}
601618

602619
return hasCompatibleVTYPE(Used, Require);
603620
}
@@ -849,9 +866,11 @@ static VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) {
849866
assert(MI.getOpcode() == RISCV::PseudoVSETVLI ||
850867
MI.getOpcode() == RISCV::PseudoVSETVLIX0);
851868
Register AVLReg = MI.getOperand(1).getReg();
852-
assert((AVLReg != RISCV::X0 || MI.getOperand(0).getReg() != RISCV::X0) &&
853-
"Can't handle X0, X0 vsetvli yet");
854-
NewInfo.setAVLReg(AVLReg);
869+
870+
if (AVLReg == RISCV::X0 && MI.getOperand(0).getReg() == RISCV::X0)
871+
NewInfo.setPreserveVL();
872+
else
873+
NewInfo.setAVLReg(AVLReg);
855874
}
856875
NewInfo.setVTYPE(MI.getOperand(2).getImm());
857876

@@ -1426,52 +1445,9 @@ static void doUnion(DemandedFields &A, DemandedFields B) {
14261445
A.MaskPolicy |= B.MaskPolicy;
14271446
}
14281447

1429-
static bool isNonZeroAVL(const MachineOperand &MO) {
1430-
if (MO.isReg())
1431-
return RISCV::X0 == MO.getReg();
1432-
assert(MO.isImm());
1433-
return 0 != MO.getImm();
1434-
}
1435-
1436-
// Return true if we can mutate PrevMI to match MI without changing any the
1437-
// fields which would be observed.
1438-
static bool canMutatePriorConfig(const MachineInstr &PrevMI,
1439-
const MachineInstr &MI,
1440-
const DemandedFields &Used) {
1441-
// If the VL values aren't equal, return false if either a) the former is
1442-
// demanded, or b) we can't rewrite the former to be the later for
1443-
// implementation reasons.
1444-
if (!isVLPreservingConfig(MI)) {
1445-
if (Used.VLAny)
1446-
return false;
1447-
1448-
// We don't bother to handle the equally zero case here as it's largely
1449-
// uninteresting.
1450-
if (Used.VLZeroness) {
1451-
if (isVLPreservingConfig(PrevMI))
1452-
return false;
1453-
if (!isNonZeroAVL(MI.getOperand(1)) ||
1454-
!isNonZeroAVL(PrevMI.getOperand(1)))
1455-
return false;
1456-
}
1457-
1458-
// TODO: Track whether the register is defined between
1459-
// PrevMI and MI.
1460-
if (MI.getOperand(1).isReg() &&
1461-
RISCV::X0 != MI.getOperand(1).getReg())
1462-
return false;
1463-
}
1464-
1465-
if (!PrevMI.getOperand(2).isImm() || !MI.getOperand(2).isImm())
1466-
return false;
1467-
1468-
auto PriorVType = PrevMI.getOperand(2).getImm();
1469-
auto VType = MI.getOperand(2).getImm();
1470-
return areCompatibleVTYPEs(PriorVType, VType, Used);
1471-
}
1472-
14731448
void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
14741449
MachineInstr *NextMI = nullptr;
1450+
VSETVLIInfo NextInfo;
14751451
// We can have arbitrary code in successors, so VL and VTYPE
14761452
// must be considered demanded.
14771453
DemandedFields Used;
@@ -1482,6 +1458,7 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
14821458

14831459
if (!isVectorConfigInstr(MI)) {
14841460
doUnion(Used, getDemanded(MI, MRI, ST));
1461+
transferAfter(NextInfo, MI);
14851462
continue;
14861463
}
14871464

@@ -1495,14 +1472,25 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
14951472
ToDelete.push_back(&MI);
14961473
// Leave NextMI unchanged
14971474
continue;
1498-
} else if (canMutatePriorConfig(MI, *NextMI, Used)) {
1475+
} else if (NextInfo.isCompatible(Used, getInfoForVSETVLI(MI), *MRI,
1476+
true)) {
14991477
if (!isVLPreservingConfig(*NextMI)) {
15001478
MI.getOperand(0).setReg(NextMI->getOperand(0).getReg());
15011479
MI.getOperand(0).setIsDead(false);
15021480
if (NextMI->getOperand(1).isImm())
15031481
MI.getOperand(1).ChangeToImmediate(NextMI->getOperand(1).getImm());
1504-
else
1505-
MI.getOperand(1).ChangeToRegister(NextMI->getOperand(1).getReg(), false);
1482+
else {
1483+
// NextMI may have an AVL (addi x0, imm) whilst MI might have a
1484+
// different non-zero AVL. But the AVLs may be considered
1485+
// compatible. So hoist it up to MI in case it's not already
1486+
// dominated by it. See hasNonZeroAVL.
1487+
if (MachineInstr *ADDI =
1488+
isADDIX0(NextMI->getOperand(1).getReg(), *MRI))
1489+
ADDI->moveBefore(&MI);
1490+
1491+
MI.getOperand(1).ChangeToRegister(NextMI->getOperand(1).getReg(),
1492+
false);
1493+
}
15061494
MI.setDesc(NextMI->getDesc());
15071495
}
15081496
MI.getOperand(2).setImm(NextMI->getOperand(2).getImm());
@@ -1511,6 +1499,7 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
15111499
}
15121500
}
15131501
NextMI = &MI;
1502+
NextInfo = getInfoForVSETVLI(MI);
15141503
Used = getDemanded(MI, MRI, ST);
15151504
}
15161505

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,10 @@ define <32 x i32> @insertelt_v32i32_0(<32 x i32> %a, i32 %y) {
5151
define <32 x i32> @insertelt_v32i32_4(<32 x i32> %a, i32 %y) {
5252
; CHECK-LABEL: insertelt_v32i32_4:
5353
; CHECK: # %bb.0:
54-
; CHECK-NEXT: li a1, 32
55-
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
56-
; CHECK-NEXT: vmv.s.x v16, a0
5754
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
55+
; CHECK-NEXT: vmv.s.x v16, a0
5856
; CHECK-NEXT: vslideup.vi v8, v16, 4
57+
; CHECK-NEXT: li a0, 32
5958
; CHECK-NEXT: ret
6059
%b = insertelement <32 x i32> %a, i32 %y, i32 4
6160
ret <32 x i32> %b
@@ -65,9 +64,8 @@ define <32 x i32> @insertelt_v32i32_31(<32 x i32> %a, i32 %y) {
6564
; CHECK-LABEL: insertelt_v32i32_31:
6665
; CHECK: # %bb.0:
6766
; CHECK-NEXT: li a1, 32
68-
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
69-
; CHECK-NEXT: vmv.s.x v16, a0
7067
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
68+
; CHECK-NEXT: vmv.s.x v16, a0
7169
; CHECK-NEXT: vslideup.vi v8, v16, 31
7270
; CHECK-NEXT: ret
7371
%b = insertelement <32 x i32> %a, i32 %y, i32 31
@@ -103,9 +101,8 @@ define <64 x i32> @insertelt_v64i32_63(<64 x i32> %a, i32 %y) {
103101
; CHECK-LABEL: insertelt_v64i32_63:
104102
; CHECK: # %bb.0:
105103
; CHECK-NEXT: li a1, 32
106-
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
107-
; CHECK-NEXT: vmv.s.x v24, a0
108104
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
105+
; CHECK-NEXT: vmv.s.x v24, a0
109106
; CHECK-NEXT: vslideup.vi v16, v24, 31
110107
; CHECK-NEXT: ret
111108
%b = insertelement <64 x i32> %a, i32 %y, i32 63

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -12418,12 +12418,11 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1241812418
; RV64ZVE32F-NEXT: andi a2, a1, 1
1241912419
; RV64ZVE32F-NEXT: beqz a2, .LBB98_2
1242012420
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
12421-
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
12421+
; RV64ZVE32F-NEXT: li a2, 32
12422+
; RV64ZVE32F-NEXT: vsetvli zero, a2, e8, mf4, tu, ma
1242212423
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1242312424
; RV64ZVE32F-NEXT: add a2, a0, a2
1242412425
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12425-
; RV64ZVE32F-NEXT: li a3, 32
12426-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, tu, ma
1242712426
; RV64ZVE32F-NEXT: vmv.s.x v10, a2
1242812427
; RV64ZVE32F-NEXT: .LBB98_2: # %else
1242912428
; RV64ZVE32F-NEXT: andi a2, a1, 2
@@ -12452,14 +12451,12 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1245212451
; RV64ZVE32F-NEXT: andi a2, a1, 16
1245312452
; RV64ZVE32F-NEXT: beqz a2, .LBB98_8
1245412453
; RV64ZVE32F-NEXT: .LBB98_7: # %cond.load10
12455-
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
12454+
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, ma
1245612455
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
1245712456
; RV64ZVE32F-NEXT: add a2, a0, a2
1245812457
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
1245912458
; RV64ZVE32F-NEXT: li a3, 32
12460-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, ta, ma
1246112459
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
12462-
; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, ma
1246312460
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
1246412461
; RV64ZVE32F-NEXT: .LBB98_8: # %else11
1246512462
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
@@ -12592,14 +12589,12 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1259212589
; RV64ZVE32F-NEXT: slli a2, a1, 43
1259312590
; RV64ZVE32F-NEXT: bgez a2, .LBB98_32
1259412591
; RV64ZVE32F-NEXT: .LBB98_31: # %cond.load58
12595-
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
12592+
; RV64ZVE32F-NEXT: vsetivli zero, 21, e8, m2, tu, ma
1259612593
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1259712594
; RV64ZVE32F-NEXT: add a2, a0, a2
1259812595
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
1259912596
; RV64ZVE32F-NEXT: li a3, 32
12600-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, ta, ma
1260112597
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
12602-
; RV64ZVE32F-NEXT: vsetivli zero, 21, e8, m2, tu, ma
1260312598
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 20
1260412599
; RV64ZVE32F-NEXT: .LBB98_32: # %else59
1260512600
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
@@ -12742,14 +12737,12 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1274212737
; RV64ZVE32F-NEXT: andi a2, a1, 256
1274312738
; RV64ZVE32F-NEXT: beqz a2, .LBB98_13
1274412739
; RV64ZVE32F-NEXT: .LBB98_53: # %cond.load22
12745-
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
12740+
; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, ma
1274612741
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1274712742
; RV64ZVE32F-NEXT: add a2, a0, a2
1274812743
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
1274912744
; RV64ZVE32F-NEXT: li a3, 32
12750-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, ta, ma
1275112745
; RV64ZVE32F-NEXT: vmv.s.x v13, a2
12752-
; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, ma
1275312746
; RV64ZVE32F-NEXT: vslideup.vi v10, v13, 8
1275412747
; RV64ZVE32F-NEXT: andi a2, a1, 512
1275512748
; RV64ZVE32F-NEXT: bnez a2, .LBB98_14
@@ -12777,14 +12770,12 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1277712770
; RV64ZVE32F-NEXT: slli a2, a1, 47
1277812771
; RV64ZVE32F-NEXT: bgez a2, .LBB98_26
1277912772
; RV64ZVE32F-NEXT: .LBB98_56: # %cond.load46
12780-
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
12773+
; RV64ZVE32F-NEXT: vsetivli zero, 17, e8, m2, tu, ma
1278112774
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1278212775
; RV64ZVE32F-NEXT: add a2, a0, a2
1278312776
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
1278412777
; RV64ZVE32F-NEXT: li a3, 32
12785-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, ta, ma
1278612778
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
12787-
; RV64ZVE32F-NEXT: vsetivli zero, 17, e8, m2, tu, ma
1278812779
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 16
1278912780
; RV64ZVE32F-NEXT: slli a2, a1, 46
1279012781
; RV64ZVE32F-NEXT: bltz a2, .LBB98_27
@@ -12835,14 +12826,12 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1283512826
; RV64ZVE32F-NEXT: slli a2, a1, 39
1283612827
; RV64ZVE32F-NEXT: bgez a2, .LBB98_37
1283712828
; RV64ZVE32F-NEXT: .LBB98_61: # %cond.load70
12838-
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
12829+
; RV64ZVE32F-NEXT: vsetivli zero, 25, e8, m2, tu, ma
1283912830
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1284012831
; RV64ZVE32F-NEXT: add a2, a0, a2
1284112832
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
1284212833
; RV64ZVE32F-NEXT: li a3, 32
12843-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, ta, ma
1284412834
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
12845-
; RV64ZVE32F-NEXT: vsetivli zero, 25, e8, m2, tu, ma
1284612835
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 24
1284712836
; RV64ZVE32F-NEXT: slli a2, a1, 38
1284812837
; RV64ZVE32F-NEXT: bltz a2, .LBB98_38
@@ -12870,14 +12859,12 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1287012859
; RV64ZVE32F-NEXT: slli a2, a1, 35
1287112860
; RV64ZVE32F-NEXT: bgez a2, .LBB98_42
1287212861
; RV64ZVE32F-NEXT: .LBB98_64: # %cond.load82
12873-
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
12862+
; RV64ZVE32F-NEXT: vsetivli zero, 29, e8, m2, tu, ma
1287412863
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1287512864
; RV64ZVE32F-NEXT: add a2, a0, a2
1287612865
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
1287712866
; RV64ZVE32F-NEXT: li a3, 32
12878-
; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, mf4, ta, ma
1287912867
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
12880-
; RV64ZVE32F-NEXT: vsetivli zero, 29, e8, m2, tu, ma
1288112868
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 28
1288212869
; RV64ZVE32F-NEXT: slli a2, a1, 34
1288312870
; RV64ZVE32F-NEXT: bltz a2, .LBB98_43

llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -329,9 +329,9 @@ entry:
329329
define double @test17(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
330330
; CHECK-LABEL: test17:
331331
; CHECK: # %bb.0: # %entry
332-
; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma
333-
; CHECK-NEXT: vfmv.f.s fa5, v8
332+
; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
334333
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
334+
; CHECK-NEXT: vfmv.f.s fa5, v8
335335
; CHECK-NEXT: vfadd.vv v8, v8, v9
336336
; CHECK-NEXT: vfmv.f.s fa4, v8
337337
; CHECK-NEXT: fadd.d fa0, fa5, fa4

0 commit comments

Comments
 (0)